mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-17 00:48:03 -05:00
Compare commits
70 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aeee22c5a4 | ||
|
|
7b4e04cd7c | ||
|
|
ae4368fabe | ||
|
|
df8e39a9e1 | ||
|
|
45b43de571 | ||
|
|
6d18a72a05 | ||
|
|
af58a75e97 | ||
|
|
fd4c3bd27a | ||
|
|
1f8a60ded2 | ||
|
|
b1b677997d | ||
|
|
f17b43d736 | ||
|
|
c009a50489 | ||
|
|
97a16c455c | ||
|
|
a8a07598c8 | ||
|
|
23206e22e8 | ||
|
|
f4aba52b90 | ||
|
|
d17c273939 | ||
|
|
aeb5e7d50a | ||
|
|
580ad30832 | ||
|
|
6390f7d734 | ||
|
|
5ddbfefb6a | ||
|
|
bbf5ed7956 | ||
|
|
19cd6eed08 | ||
|
|
9c1eb263a8 | ||
|
|
75755189a7 | ||
|
|
a9ab72d27d | ||
|
|
678eb34995 | ||
|
|
ef7050f560 | ||
|
|
9787d9de74 | ||
|
|
bb4a50bab2 | ||
|
|
f3554b4e1b | ||
|
|
9dcb025241 | ||
|
|
ecf646066a | ||
|
|
3fd10b68cd | ||
|
|
6e32c7993c | ||
|
|
8329533848 | ||
|
|
fc7157b029 | ||
|
|
a1897f7490 | ||
|
|
a89b3efd14 | ||
|
|
5259693ed1 | ||
|
|
d77c24206d | ||
|
|
c5069557f3 | ||
|
|
9b220f61bd | ||
|
|
7fc3af12cc | ||
|
|
e2721b46b6 | ||
|
|
17118a04bd | ||
|
|
24788e3c83 | ||
|
|
056387c981 | ||
|
|
8a43d90273 | ||
|
|
4f9b9760db | ||
|
|
fdaddafa56 | ||
|
|
23d59abbd7 | ||
|
|
cf7fa5bce8 | ||
|
|
39e41998bb | ||
|
|
c6eff71b74 | ||
|
|
6ea4c47757 | ||
|
|
91f91aa835 | ||
|
|
ea7868d076 | ||
|
|
7d86f00d82 | ||
|
|
7785061e7d | ||
|
|
3370052e54 | ||
|
|
325dacd29c | ||
|
|
f4981a6ba9 | ||
|
|
8c159942eb | ||
|
|
deb4dc64af | ||
|
|
1a11437b6f | ||
|
|
04572c94ad | ||
|
|
1e9e78089e | ||
|
|
e65f93663d | ||
|
|
2a796fe25e |
8
.github/workflows/build-container.yml
vendored
8
.github/workflows/build-container.yml
vendored
@@ -45,6 +45,9 @@ jobs:
|
||||
steps:
|
||||
- name: Free up more disk space on the runner
|
||||
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
||||
# the /mnt dir has 70GBs of free space
|
||||
# /dev/sda1 74G 28K 70G 1% /mnt
|
||||
# According to some online posts the /mnt is not always there, so checking before setting docker to use it
|
||||
run: |
|
||||
echo "----- Free space before cleanup"
|
||||
df -h
|
||||
@@ -52,6 +55,11 @@ jobs:
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
sudo swapoff /mnt/swapfile
|
||||
sudo rm -rf /mnt/swapfile
|
||||
if [ -d /mnt ]; then
|
||||
sudo chmod -R 777 /mnt
|
||||
echo '{"data-root": "/mnt/docker-root"}' | sudo tee /etc/docker/daemon.json
|
||||
sudo systemctl restart docker
|
||||
fi
|
||||
echo "----- Free space after cleanup"
|
||||
df -h
|
||||
|
||||
|
||||
@@ -265,7 +265,7 @@ If the key is unrecognized, this call raises an
|
||||
|
||||
#### exists(key) -> AnyModelConfig
|
||||
|
||||
Returns True if a model with the given key exists in the databsae.
|
||||
Returns True if a model with the given key exists in the database.
|
||||
|
||||
#### search_by_path(path) -> AnyModelConfig
|
||||
|
||||
@@ -718,7 +718,7 @@ When downloading remote models is implemented, additional
|
||||
configuration information, such as list of trigger terms, will be
|
||||
retrieved from the HuggingFace and Civitai model repositories.
|
||||
|
||||
The probed values can be overriden by providing a dictionary in the
|
||||
The probed values can be overridden by providing a dictionary in the
|
||||
optional `config` argument passed to `import_model()`. You may provide
|
||||
overriding values for any of the model's configuration
|
||||
attributes. Here is an example of setting the
|
||||
@@ -841,7 +841,7 @@ variable.
|
||||
|
||||
#### installer.start(invoker)
|
||||
|
||||
The `start` method is called by the API intialization routines when
|
||||
The `start` method is called by the API initialization routines when
|
||||
the API starts up. Its effect is to call `sync_to_config()` to
|
||||
synchronize the model record store database with what's currently on
|
||||
disk.
|
||||
|
||||
@@ -16,7 +16,7 @@ We thank [all contributors](https://github.com/invoke-ai/InvokeAI/graphs/contrib
|
||||
- @psychedelicious (Spencer Mabrito) - Web Team Leader
|
||||
- @joshistoast (Josh Corbett) - Web Development
|
||||
- @cheerio (Mary Rogers) - Lead Engineer & Web App Development
|
||||
- @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
- @ebr (Eugene Brodsky) - Cloud/DevOps/Software engineer; your friendly neighbourhood cluster-autoscaler
|
||||
- @sunija - Standalone version
|
||||
- @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
|
||||
- @ryanjdick (Ryan Dick) - Machine Learning & Training
|
||||
|
||||
@@ -41,7 +41,7 @@ Nodes have a "Use Cache" option in their footer. This allows for performance imp
|
||||
|
||||
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
|
||||
|
||||
### Noise
|
||||
### Create Latent Noise
|
||||
|
||||
An initial noise tensor is necessary for the latent diffusion process. As a result, the Denoising node requires a noise node input.
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_cogview4
|
||||
|
||||
# TODO(ryand): This is effectively a copy of SD3ImageToLatentsInvocation and a subset of ImageToLatentsInvocation. We
|
||||
# should refactor to avoid this duplication.
|
||||
@@ -38,7 +39,11 @@ class CogView4ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae_info.model, AutoencoderKL)
|
||||
estimated_working_memory = estimate_vae_working_memory_cogview4(
|
||||
operation="encode", image_tensor=image_tensor, vae=vae_info.model
|
||||
)
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoencoderKL)
|
||||
|
||||
vae.disable_tiling()
|
||||
@@ -62,6 +67,8 @@ class CogView4ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, AutoencoderKL)
|
||||
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
|
||||
@@ -6,7 +6,6 @@ from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
@@ -20,6 +19,7 @@ from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_cogview4
|
||||
|
||||
# TODO(ryand): This is effectively a copy of SD3LatentsToImageInvocation and a subset of LatentsToImageInvocation. We
|
||||
# should refactor to avoid this duplication.
|
||||
@@ -39,22 +39,15 @@ class CogView4LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
latents: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
|
||||
|
||||
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoencoderKL) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
scaling_constant = 2200 # Determined experimentally.
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
return int(working_memory)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL))
|
||||
estimated_working_memory = self._estimate_working_memory(latents, vae_info.model)
|
||||
estimated_working_memory = estimate_vae_working_memory_cogview4(
|
||||
operation="decode", image_tensor=latents, vae=vae_info.model
|
||||
)
|
||||
with (
|
||||
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
|
||||
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),
|
||||
|
||||
@@ -328,6 +328,21 @@ class FluxDenoiseInvocation(BaseInvocation):
|
||||
cfg_scale_end_step=self.cfg_scale_end_step,
|
||||
)
|
||||
|
||||
kontext_extension = None
|
||||
if self.kontext_conditioning:
|
||||
if not self.controlnet_vae:
|
||||
raise ValueError("A VAE (e.g., controlnet_vae) must be provided to use Kontext conditioning.")
|
||||
|
||||
kontext_extension = KontextExtension(
|
||||
context=context,
|
||||
kontext_conditioning=self.kontext_conditioning
|
||||
if isinstance(self.kontext_conditioning, list)
|
||||
else [self.kontext_conditioning],
|
||||
vae_field=self.controlnet_vae,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
dtype=inference_dtype,
|
||||
)
|
||||
|
||||
with ExitStack() as exit_stack:
|
||||
# Prepare ControlNet extensions.
|
||||
# Note: We do this before loading the transformer model to minimize peak memory (see implementation).
|
||||
@@ -385,21 +400,6 @@ class FluxDenoiseInvocation(BaseInvocation):
|
||||
dtype=inference_dtype,
|
||||
)
|
||||
|
||||
kontext_extension = None
|
||||
if self.kontext_conditioning:
|
||||
if not self.controlnet_vae:
|
||||
raise ValueError("A VAE (e.g., controlnet_vae) must be provided to use Kontext conditioning.")
|
||||
|
||||
kontext_extension = KontextExtension(
|
||||
context=context,
|
||||
kontext_conditioning=self.kontext_conditioning
|
||||
if isinstance(self.kontext_conditioning, list)
|
||||
else [self.kontext_conditioning],
|
||||
vae_field=self.controlnet_vae,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
dtype=inference_dtype,
|
||||
)
|
||||
|
||||
# Prepare Kontext conditioning if provided
|
||||
img_cond_seq = None
|
||||
img_cond_seq_ids = None
|
||||
|
||||
@@ -3,7 +3,6 @@ from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
@@ -18,6 +17,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_flux
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -39,17 +39,11 @@ class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoEncoder) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
scaling_constant = 2200 # Determined experimentally.
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
return int(working_memory)
|
||||
|
||||
def _vae_decode(self, vae_info: LoadedModel, latents: torch.Tensor) -> Image.Image:
|
||||
estimated_working_memory = self._estimate_working_memory(latents, vae_info.model)
|
||||
assert isinstance(vae_info.model, AutoEncoder)
|
||||
estimated_working_memory = estimate_vae_working_memory_flux(
|
||||
operation="decode", image_tensor=latents, vae=vae_info.model
|
||||
)
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
|
||||
@@ -15,6 +15,7 @@ from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.model_manager import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_flux
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -41,8 +42,12 @@ class FluxVaeEncodeInvocation(BaseInvocation):
|
||||
# TODO(ryand): Write a util function for generating random tensors that is consistent across devices / dtypes.
|
||||
# There's a starting point in get_noise(...), but it needs to be extracted and generalized. This function
|
||||
# should be used for VAE encode sampling.
|
||||
assert isinstance(vae_info.model, AutoEncoder)
|
||||
estimated_working_memory = estimate_vae_working_memory_flux(
|
||||
operation="encode", image_tensor=image_tensor, vae=vae_info.model
|
||||
)
|
||||
generator = torch.Generator(device=TorchDevice.choose_torch_device()).manual_seed(0)
|
||||
with vae_info as vae:
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
|
||||
|
||||
@@ -27,6 +27,7 @@ from invokeai.backend.model_manager import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd15_sdxl
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -52,11 +53,24 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
|
||||
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
|
||||
|
||||
@staticmethod
|
||||
@classmethod
|
||||
def vae_encode(
|
||||
vae_info: LoadedModel, upcast: bool, tiled: bool, image_tensor: torch.Tensor, tile_size: int = 0
|
||||
cls,
|
||||
vae_info: LoadedModel,
|
||||
upcast: bool,
|
||||
tiled: bool,
|
||||
image_tensor: torch.Tensor,
|
||||
tile_size: int = 0,
|
||||
) -> torch.Tensor:
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
|
||||
estimated_working_memory = estimate_vae_working_memory_sd15_sdxl(
|
||||
operation="encode",
|
||||
image_tensor=image_tensor,
|
||||
vae=vae_info.model,
|
||||
tile_size=tile_size if tiled else None,
|
||||
fp32=upcast,
|
||||
)
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, (AutoencoderKL, AutoencoderTiny))
|
||||
orig_dtype = vae.dtype
|
||||
if upcast:
|
||||
@@ -113,6 +127,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
|
||||
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
if image_tensor.dim() == 3:
|
||||
@@ -120,7 +135,11 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
|
||||
context.util.signal_progress("Running VAE encoder")
|
||||
latents = self.vae_encode(
|
||||
vae_info=vae_info, upcast=self.fp32, tiled=self.tiled, image_tensor=image_tensor, tile_size=self.tile_size
|
||||
vae_info=vae_info,
|
||||
upcast=self.fp32,
|
||||
tiled=self.tiled or context.config.get().force_tiled_decode,
|
||||
image_tensor=image_tensor,
|
||||
tile_size=self.tile_size,
|
||||
)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
|
||||
@@ -27,6 +27,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
|
||||
from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd15_sdxl
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -53,39 +54,6 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
|
||||
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
|
||||
|
||||
def _estimate_working_memory(
|
||||
self, latents: torch.Tensor, use_tiling: bool, vae: AutoencoderKL | AutoencoderTiny
|
||||
) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
|
||||
# element size (precision). This estimate is accurate for both SD1 and SDXL.
|
||||
element_size = 4 if self.fp32 else 2
|
||||
scaling_constant = 2200 # Determined experimentally.
|
||||
|
||||
if use_tiling:
|
||||
tile_size = self.tile_size
|
||||
if tile_size == 0:
|
||||
tile_size = vae.tile_sample_min_size
|
||||
assert isinstance(tile_size, int)
|
||||
out_h = tile_size
|
||||
out_w = tile_size
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
|
||||
# We add 25% to the working memory estimate when tiling is enabled to account for factors like tile overlap
|
||||
# and number of tiles. We could make this more precise in the future, but this should be good enough for
|
||||
# most use cases.
|
||||
working_memory = working_memory * 1.25
|
||||
else:
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
|
||||
if self.fp32:
|
||||
# If we are running in FP32, then we should account for the likely increase in model size (~250MB).
|
||||
working_memory += 250 * 2**20
|
||||
|
||||
return int(working_memory)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
@@ -94,8 +62,13 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
|
||||
|
||||
estimated_working_memory = self._estimate_working_memory(latents, use_tiling, vae_info.model)
|
||||
estimated_working_memory = estimate_vae_working_memory_sd15_sdxl(
|
||||
operation="decode",
|
||||
image_tensor=latents,
|
||||
vae=vae_info.model,
|
||||
tile_size=self.tile_size if use_tiling else None,
|
||||
fp32=self.fp32,
|
||||
)
|
||||
with (
|
||||
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
|
||||
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),
|
||||
|
||||
@@ -17,6 +17,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd3
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -34,7 +35,11 @@ class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae_info.model, AutoencoderKL)
|
||||
estimated_working_memory = estimate_vae_working_memory_sd3(
|
||||
operation="encode", image_tensor=image_tensor, vae=vae_info.model
|
||||
)
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoencoderKL)
|
||||
|
||||
vae.disable_tiling()
|
||||
@@ -58,6 +63,8 @@ class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, AutoencoderKL)
|
||||
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
|
||||
@@ -6,7 +6,6 @@ from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
@@ -20,6 +19,7 @@ from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.vae_working_memory import estimate_vae_working_memory_sd3
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -41,22 +41,15 @@ class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoencoderKL) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
scaling_constant = 2200 # Determined experimentally.
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
return int(working_memory)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL))
|
||||
estimated_working_memory = self._estimate_working_memory(latents, vae_info.model)
|
||||
estimated_working_memory = estimate_vae_working_memory_sd3(
|
||||
operation="decode", image_tensor=latents, vae=vae_info.model
|
||||
)
|
||||
with (
|
||||
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
|
||||
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),
|
||||
|
||||
@@ -186,8 +186,9 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
info: AnyModelConfig = self._probe(Path(model_path), config) # type: ignore
|
||||
|
||||
if preferred_name := config.name:
|
||||
# Careful! Don't use pathlib.Path(...).with_suffix - it can will strip everything after the first dot.
|
||||
preferred_name = f"{preferred_name}{model_path.suffix}"
|
||||
if Path(model_path).is_file():
|
||||
# Careful! Don't use pathlib.Path(...).with_suffix - it can will strip everything after the first dot.
|
||||
preferred_name = f"{preferred_name}{model_path.suffix}"
|
||||
|
||||
dest_path = (
|
||||
self.app_config.models_path / info.base.value / info.type.value / (preferred_name or model_path.name)
|
||||
@@ -622,16 +623,13 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
if old_path == new_path:
|
||||
return old_path
|
||||
|
||||
if new_path.exists():
|
||||
raise FileExistsError(f"Cannot move {old_path} to {new_path}: destination already exists")
|
||||
|
||||
new_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# if path already exists then we jigger the name to make it unique
|
||||
counter: int = 1
|
||||
while new_path.exists():
|
||||
path = new_path.with_stem(new_path.stem + f"_{counter:02d}")
|
||||
if not path.exists():
|
||||
new_path = path
|
||||
counter += 1
|
||||
move(old_path, new_path)
|
||||
|
||||
return new_path
|
||||
|
||||
def _probe(self, model_path: Path, config: Optional[ModelRecordChanges] = None):
|
||||
|
||||
@@ -106,8 +106,8 @@ class KontextExtension:
|
||||
|
||||
# Track cumulative dimensions for spatial tiling
|
||||
# These track the running extent of the virtual canvas in latent space
|
||||
h = 0 # Running height extent
|
||||
w = 0 # Running width extent
|
||||
canvas_h = 0 # Running canvas height
|
||||
canvas_w = 0 # Running canvas width
|
||||
|
||||
vae_info = self._context.models.load(self._vae_field.vae)
|
||||
|
||||
@@ -131,12 +131,20 @@ class KontextExtension:
|
||||
|
||||
# Continue with VAE encoding
|
||||
# Don't sample from the distribution for reference images - use the mean (matching ComfyUI)
|
||||
with vae_info as vae:
|
||||
# Estimate working memory for encode operation (50% of decode memory requirements)
|
||||
img_h = image_tensor.shape[-2]
|
||||
img_w = image_tensor.shape[-1]
|
||||
element_size = next(vae_info.model.parameters()).element_size()
|
||||
scaling_constant = 1100 # 50% of decode scaling constant (2200)
|
||||
estimated_working_memory = int(img_h * img_w * element_size * scaling_constant)
|
||||
|
||||
with vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae):
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
|
||||
# Use sample=False to get the distribution mean without noise
|
||||
kontext_latents_unpacked = vae.encode(image_tensor, sample=False)
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
# Extract tensor dimensions
|
||||
batch_size, _, latent_height, latent_width = kontext_latents_unpacked.shape
|
||||
@@ -154,21 +162,33 @@ class KontextExtension:
|
||||
kontext_latents_packed = pack(kontext_latents_unpacked).to(self._device, self._dtype)
|
||||
|
||||
# Determine spatial offsets for this reference image
|
||||
# - Compare the potential new canvas dimensions if we add the image vertically vs horizontally
|
||||
# - Choose the placement that results in a more square-like canvas
|
||||
h_offset = 0
|
||||
w_offset = 0
|
||||
|
||||
if idx > 0: # First image starts at (0, 0)
|
||||
# Check which placement would result in better canvas dimensions
|
||||
# If adding to height would make the canvas taller than wide, tile horizontally
|
||||
# Otherwise, tile vertically
|
||||
if latent_height + h > latent_width + w:
|
||||
# Calculate potential canvas dimensions for each tiling option
|
||||
# Option 1: Tile vertically (below existing content)
|
||||
potential_h_vertical = canvas_h + latent_height
|
||||
|
||||
# Option 2: Tile horizontally (to the right of existing content)
|
||||
potential_w_horizontal = canvas_w + latent_width
|
||||
|
||||
# Choose arrangement that minimizes the maximum dimension
|
||||
# This keeps the canvas closer to square, optimizing attention computation
|
||||
if potential_h_vertical > potential_w_horizontal:
|
||||
# Tile horizontally (to the right of existing images)
|
||||
w_offset = w
|
||||
w_offset = canvas_w
|
||||
canvas_w = canvas_w + latent_width
|
||||
canvas_h = max(canvas_h, latent_height)
|
||||
else:
|
||||
# Tile vertically (below existing images)
|
||||
h_offset = h
|
||||
h_offset = canvas_h
|
||||
canvas_h = canvas_h + latent_height
|
||||
canvas_w = max(canvas_w, latent_width)
|
||||
else:
|
||||
# First image - just set canvas dimensions
|
||||
canvas_h = latent_height
|
||||
canvas_w = latent_width
|
||||
|
||||
# Generate IDs with both index offset and spatial offsets
|
||||
kontext_ids = generate_img_ids_with_offset(
|
||||
@@ -182,11 +202,6 @@ class KontextExtension:
|
||||
w_offset=w_offset,
|
||||
)
|
||||
|
||||
# Update cumulative dimensions
|
||||
# Track the maximum extent of the virtual canvas after placing this image
|
||||
h = max(h, latent_height + h_offset)
|
||||
w = max(w, latent_width + w_offset)
|
||||
|
||||
all_latents.append(kontext_latents_packed)
|
||||
all_ids.append(kontext_ids)
|
||||
|
||||
|
||||
@@ -18,16 +18,25 @@ def is_state_dict_likely_in_flux_diffusers_format(state_dict: Dict[str, torch.Te
|
||||
# First, check that all keys end in "lora_A.weight" or "lora_B.weight" (i.e. are in PEFT format).
|
||||
all_keys_in_peft_format = all(k.endswith(("lora_A.weight", "lora_B.weight")) for k in state_dict.keys())
|
||||
|
||||
# Next, check that this is likely a FLUX model by spot-checking a few keys.
|
||||
expected_keys = [
|
||||
# Check if keys use transformer prefix
|
||||
transformer_prefix_keys = [
|
||||
"transformer.single_transformer_blocks.0.attn.to_q.lora_A.weight",
|
||||
"transformer.single_transformer_blocks.0.attn.to_q.lora_B.weight",
|
||||
"transformer.transformer_blocks.0.attn.add_q_proj.lora_A.weight",
|
||||
"transformer.transformer_blocks.0.attn.add_q_proj.lora_B.weight",
|
||||
]
|
||||
all_expected_keys_present = all(k in state_dict for k in expected_keys)
|
||||
transformer_keys_present = all(k in state_dict for k in transformer_prefix_keys)
|
||||
|
||||
return all_keys_in_peft_format and all_expected_keys_present
|
||||
# Check if keys use base_model.model prefix
|
||||
base_model_prefix_keys = [
|
||||
"base_model.model.single_transformer_blocks.0.attn.to_q.lora_A.weight",
|
||||
"base_model.model.single_transformer_blocks.0.attn.to_q.lora_B.weight",
|
||||
"base_model.model.transformer_blocks.0.attn.add_q_proj.lora_A.weight",
|
||||
"base_model.model.transformer_blocks.0.attn.add_q_proj.lora_B.weight",
|
||||
]
|
||||
base_model_keys_present = all(k in state_dict for k in base_model_prefix_keys)
|
||||
|
||||
return all_keys_in_peft_format and (transformer_keys_present or base_model_keys_present)
|
||||
|
||||
|
||||
def lora_model_from_flux_diffusers_state_dict(
|
||||
@@ -49,8 +58,16 @@ def lora_layers_from_flux_diffusers_grouped_state_dict(
|
||||
https://github.com/huggingface/diffusers/blob/55ac421f7bb12fd00ccbef727be4dc2f3f920abb/scripts/convert_flux_to_diffusers.py
|
||||
"""
|
||||
|
||||
# Remove the "transformer." prefix from all keys.
|
||||
grouped_state_dict = {k.replace("transformer.", ""): v for k, v in grouped_state_dict.items()}
|
||||
# Determine which prefix is used and remove it from all keys.
|
||||
# Check if any key starts with "base_model.model." prefix
|
||||
has_base_model_prefix = any(k.startswith("base_model.model.") for k in grouped_state_dict.keys())
|
||||
|
||||
if has_base_model_prefix:
|
||||
# Remove the "base_model.model." prefix from all keys.
|
||||
grouped_state_dict = {k.replace("base_model.model.", ""): v for k, v in grouped_state_dict.items()}
|
||||
else:
|
||||
# Remove the "transformer." prefix from all keys.
|
||||
grouped_state_dict = {k.replace("transformer.", ""): v for k, v in grouped_state_dict.items()}
|
||||
|
||||
# Constants for FLUX.1
|
||||
num_double_layers = 19
|
||||
|
||||
@@ -20,7 +20,7 @@ def main():
|
||||
"/data/invokeai/models/.download_cache/https__huggingface.co_black-forest-labs_flux.1-schnell_resolve_main_flux1-schnell.safetensors/flux1-schnell.safetensors"
|
||||
)
|
||||
|
||||
with log_time("Intialize FLUX transformer on meta device"):
|
||||
with log_time("Initialize FLUX transformer on meta device"):
|
||||
# TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config.
|
||||
p = params["flux-schnell"]
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ def main():
|
||||
)
|
||||
|
||||
# inference_dtype = torch.bfloat16
|
||||
with log_time("Intialize FLUX transformer on meta device"):
|
||||
with log_time("Initialize FLUX transformer on meta device"):
|
||||
# TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config.
|
||||
p = params["flux-schnell"]
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ def main():
|
||||
"""
|
||||
model_path = Path("/data/misc/text_encoder_2")
|
||||
|
||||
with log_time("Intialize T5 on meta device"):
|
||||
with log_time("Initialize T5 on meta device"):
|
||||
model_config = AutoConfig.from_pretrained(model_path)
|
||||
with accelerate.init_empty_weights():
|
||||
model = AutoModelForTextEncoding.from_config(model_config)
|
||||
|
||||
117
invokeai/backend/util/vae_working_memory.py
Normal file
117
invokeai/backend/util/vae_working_memory.py
Normal file
@@ -0,0 +1,117 @@
|
||||
from typing import Literal
|
||||
|
||||
import torch
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
|
||||
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
|
||||
|
||||
def estimate_vae_working_memory_sd15_sdxl(
|
||||
operation: Literal["encode", "decode"],
|
||||
image_tensor: torch.Tensor,
|
||||
vae: AutoencoderKL | AutoencoderTiny,
|
||||
tile_size: int | None,
|
||||
fp32: bool,
|
||||
) -> int:
|
||||
"""Estimate the working memory required to encode or decode the given tensor."""
|
||||
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
|
||||
# element size (precision). This estimate is accurate for both SD1 and SDXL.
|
||||
element_size = 4 if fp32 else 2
|
||||
|
||||
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
|
||||
# Encoding uses ~45% the working memory as decoding.
|
||||
scaling_constant = 2200 if operation == "decode" else 1100
|
||||
|
||||
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
|
||||
|
||||
if tile_size is not None:
|
||||
if tile_size == 0:
|
||||
tile_size = vae.tile_sample_min_size
|
||||
assert isinstance(tile_size, int)
|
||||
h = tile_size
|
||||
w = tile_size
|
||||
working_memory = h * w * element_size * scaling_constant
|
||||
|
||||
# We add 25% to the working memory estimate when tiling is enabled to account for factors like tile overlap
|
||||
# and number of tiles. We could make this more precise in the future, but this should be good enough for
|
||||
# most use cases.
|
||||
working_memory = working_memory * 1.25
|
||||
else:
|
||||
h = latent_scale_factor_for_operation * image_tensor.shape[-2]
|
||||
w = latent_scale_factor_for_operation * image_tensor.shape[-1]
|
||||
working_memory = h * w * element_size * scaling_constant
|
||||
|
||||
if fp32:
|
||||
# If we are running in FP32, then we should account for the likely increase in model size (~250MB).
|
||||
working_memory += 250 * 2**20
|
||||
|
||||
print(f"estimate_vae_working_memory_sd15_sdxl: {int(working_memory)}")
|
||||
|
||||
return int(working_memory)
|
||||
|
||||
|
||||
def estimate_vae_working_memory_cogview4(
|
||||
operation: Literal["encode", "decode"], image_tensor: torch.Tensor, vae: AutoencoderKL
|
||||
) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
|
||||
|
||||
h = latent_scale_factor_for_operation * image_tensor.shape[-2]
|
||||
w = latent_scale_factor_for_operation * image_tensor.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
|
||||
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
|
||||
# Encoding uses ~45% the working memory as decoding.
|
||||
scaling_constant = 2200 if operation == "decode" else 1100
|
||||
working_memory = h * w * element_size * scaling_constant
|
||||
|
||||
print(f"estimate_vae_working_memory_cogview4: {int(working_memory)}")
|
||||
|
||||
return int(working_memory)
|
||||
|
||||
|
||||
def estimate_vae_working_memory_flux(
|
||||
operation: Literal["encode", "decode"], image_tensor: torch.Tensor, vae: AutoEncoder
|
||||
) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
|
||||
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
|
||||
|
||||
out_h = latent_scale_factor_for_operation * image_tensor.shape[-2]
|
||||
out_w = latent_scale_factor_for_operation * image_tensor.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
|
||||
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
|
||||
# Encoding uses ~45% the working memory as decoding.
|
||||
scaling_constant = 2200 if operation == "decode" else 1100
|
||||
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
|
||||
print(f"estimate_vae_working_memory_flux: {int(working_memory)}")
|
||||
|
||||
return int(working_memory)
|
||||
|
||||
|
||||
def estimate_vae_working_memory_sd3(
|
||||
operation: Literal["encode", "decode"], image_tensor: torch.Tensor, vae: AutoencoderKL
|
||||
) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
# Encode operations use approximately 50% of the memory required for decode operations
|
||||
|
||||
latent_scale_factor_for_operation = LATENT_SCALE_FACTOR if operation == "decode" else 1
|
||||
|
||||
h = latent_scale_factor_for_operation * image_tensor.shape[-2]
|
||||
w = latent_scale_factor_for_operation * image_tensor.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
|
||||
# This constant is determined experimentally and takes into consideration both allocated and reserved memory. See #8414
|
||||
# Encoding uses ~45% the working memory as decoding.
|
||||
scaling_constant = 2200 if operation == "decode" else 1100
|
||||
|
||||
working_memory = h * w * element_size * scaling_constant
|
||||
|
||||
print(f"estimate_vae_working_memory_sd3: {int(working_memory)}")
|
||||
|
||||
return int(working_memory)
|
||||
@@ -38,6 +38,7 @@
|
||||
"deletedImagesCannotBeRestored": "Deleted images cannot be restored.",
|
||||
"hideBoards": "Hide Boards",
|
||||
"loading": "Loading...",
|
||||
"locateInGalery": "Locate in Gallery",
|
||||
"menuItemAutoAdd": "Auto-add to this Board",
|
||||
"move": "Move",
|
||||
"movingImagesToBoard_one": "Moving {{count}} image to board:",
|
||||
@@ -114,6 +115,9 @@
|
||||
"t2iAdapter": "T2I Adapter",
|
||||
"positivePrompt": "Positive Prompt",
|
||||
"negativePrompt": "Negative Prompt",
|
||||
"removeNegativePrompt": "Remove Negative Prompt",
|
||||
"addNegativePrompt": "Add Negative Prompt",
|
||||
"selectYourModel": "Select Your Model",
|
||||
"discordLabel": "Discord",
|
||||
"dontAskMeAgain": "Don't ask me again",
|
||||
"dontShowMeThese": "Don't show me these",
|
||||
@@ -767,6 +771,7 @@
|
||||
"allPrompts": "All Prompts",
|
||||
"cfgScale": "CFG scale",
|
||||
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
|
||||
"clipSkip": "$t(parameters.clipSkip)",
|
||||
"createdBy": "Created By",
|
||||
"generationMode": "Generation Mode",
|
||||
"guidance": "Guidance",
|
||||
@@ -869,6 +874,9 @@
|
||||
"install": "Install",
|
||||
"installAll": "Install All",
|
||||
"installRepo": "Install Repo",
|
||||
"installBundle": "Install Bundle",
|
||||
"installBundleMsg1": "Are you sure you want to install the {{bundleName}} bundle?",
|
||||
"installBundleMsg2": "This bundle will install the following {{count}} models:",
|
||||
"ipAdapters": "IP Adapters",
|
||||
"learnMoreAboutSupportedModels": "Learn more about the models we support",
|
||||
"load": "Load",
|
||||
@@ -1287,6 +1295,7 @@
|
||||
"remixImage": "Remix Image",
|
||||
"usePrompt": "Use Prompt",
|
||||
"useSeed": "Use Seed",
|
||||
"useClipSkip": "Use CLIP Skip",
|
||||
"width": "Width",
|
||||
"gaussianBlur": "Gaussian Blur",
|
||||
"boxBlur": "Box Blur",
|
||||
@@ -2180,7 +2189,8 @@
|
||||
"rgReferenceImagesNotSupported": "regional Reference Images not supported for selected base model",
|
||||
"rgAutoNegativeNotSupported": "Auto-Negative not supported for selected base model",
|
||||
"rgNoRegion": "no region drawn",
|
||||
"fluxFillIncompatibleWithControlLoRA": "Control LoRA is not compatible with FLUX Fill"
|
||||
"fluxFillIncompatibleWithControlLoRA": "Control LoRA is not compatible with FLUX Fill",
|
||||
"bboxHidden": "Bounding box is hidden (shift+o to toggle)"
|
||||
},
|
||||
"errors": {
|
||||
"unableToFindImage": "Unable to find image",
|
||||
@@ -2672,8 +2682,8 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"Studio state is saved to the server, allowing you to continue your work on any device.",
|
||||
"Support for multiple reference images for FLUX Kontext (local model only)."
|
||||
"Misc QoL: Toggle Bbox visibility, highlight nodes with errors, prevent adding node fields to Builder form multiple times, CLIP Skip metadata recallable",
|
||||
"Reduced VRAM usage for multiple Kontext Ref images and VAE encoding"
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
|
||||
@@ -128,7 +128,9 @@
|
||||
"search": "Cerca",
|
||||
"clear": "Cancella",
|
||||
"compactView": "Vista compatta",
|
||||
"fullView": "Vista completa"
|
||||
"fullView": "Vista completa",
|
||||
"removeNegativePrompt": "Rimuovi prompt negativo",
|
||||
"addNegativePrompt": "Aggiungi prompt negativo"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -410,6 +412,10 @@
|
||||
"cancelSegmentAnything": {
|
||||
"title": "Annulla Segment Anything",
|
||||
"desc": "Annulla l'operazione Segment Anything corrente."
|
||||
},
|
||||
"fitBboxToLayers": {
|
||||
"title": "Adatta il riquadro di delimitazione ai livelli",
|
||||
"desc": "Regola automaticamente il riquadro di delimitazione della generazione per adattarlo ai livelli visibili"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
|
||||
@@ -252,7 +252,10 @@
|
||||
"clear": "Dọn Dẹp",
|
||||
"compactView": "Chế Độ Xem Gọn",
|
||||
"fullView": "Chế Độ Xem Đầy Đủ",
|
||||
"options_withCount_other": "{{count}} thiết lập"
|
||||
"options_withCount_other": "{{count}} thiết lập",
|
||||
"removeNegativePrompt": "Xóa Lệnh Tiêu Cực",
|
||||
"addNegativePrompt": "Thêm Lệnh Tiêu Cực",
|
||||
"selectYourModel": "Chọn Model"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Thêm Trigger Cho Lệnh",
|
||||
@@ -492,6 +495,10 @@
|
||||
"title": "Huỷ Segment Anything",
|
||||
"desc": "Huỷ hoạt động Segment Anything hiện tại.",
|
||||
"key": "esc"
|
||||
},
|
||||
"fitBboxToLayers": {
|
||||
"title": "Xếp Vừa Hộp Giới Hạn Vào Layer",
|
||||
"desc": "Tự động điểu chỉnh hộp giới hạn tạo sinh vừa vặn vào layer nhìn thấy được"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
@@ -898,7 +905,8 @@
|
||||
"recallParameters": "Gợi Nhớ Tham Số",
|
||||
"scheduler": "Scheduler",
|
||||
"noMetaData": "Không tìm thấy metadata",
|
||||
"imageDimensions": "Kích Thước Ảnh"
|
||||
"imageDimensions": "Kích Thước Ảnh",
|
||||
"clipSkip": "$t(parameters.clipSkip)"
|
||||
},
|
||||
"accordions": {
|
||||
"generation": {
|
||||
@@ -1707,7 +1715,8 @@
|
||||
"upscaling": "Upscale",
|
||||
"tileSize": "Kích Thước Khối",
|
||||
"disabledNoRasterContent": "Đã Tắt (Không Có Nội Dung Dạng Raster)",
|
||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần <LinkComponent>thiết lập tài khoản</LinkComponent> để nâng cấp."
|
||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần <LinkComponent>thiết lập tài khoản</LinkComponent> để nâng cấp.",
|
||||
"useClipSkip": "Dùng CLIP Skip"
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"seedBehaviour": {
|
||||
@@ -2198,7 +2207,8 @@
|
||||
"rgReferenceImagesNotSupported": "Ảnh Mẫu Khu Vực không được hỗ trợ cho model cơ sở được chọn",
|
||||
"rgAutoNegativeNotSupported": "Tự Động Đảo Chiều không được hỗ trợ cho model cơ sở được chọn",
|
||||
"rgNoRegion": "không có khu vực được vẽ",
|
||||
"fluxFillIncompatibleWithControlLoRA": "LoRA Điều Khiển Được không tương tích với FLUX Fill"
|
||||
"fluxFillIncompatibleWithControlLoRA": "LoRA Điều Khiển Được không tương tích với FLUX Fill",
|
||||
"bboxHidden": "Hộp giới hạn đang ẩn (shift+o để bật/tắt)"
|
||||
},
|
||||
"pasteTo": "Dán Vào",
|
||||
"pasteToAssets": "Tài Nguyên",
|
||||
|
||||
@@ -71,7 +71,7 @@ interface Props extends PropsWithChildren {
|
||||
* If provided, overrides in-app navigation to the model manager
|
||||
*/
|
||||
onClickGoToModelManager?: () => void;
|
||||
storagePersistThrottle?: number;
|
||||
storagePersistDebounce?: number;
|
||||
}
|
||||
|
||||
const InvokeAIUI = ({
|
||||
@@ -98,7 +98,7 @@ const InvokeAIUI = ({
|
||||
loggingOverrides,
|
||||
onClickGoToModelManager,
|
||||
whatsNew,
|
||||
storagePersistThrottle = 2000,
|
||||
storagePersistDebounce = 300,
|
||||
}: Props) => {
|
||||
const [store, setStore] = useState<ReturnType<typeof createStore> | undefined>(undefined);
|
||||
const [didRehydrate, setDidRehydrate] = useState(false);
|
||||
@@ -318,7 +318,7 @@ const InvokeAIUI = ({
|
||||
const onRehydrated = () => {
|
||||
setDidRehydrate(true);
|
||||
};
|
||||
const store = createStore({ persist: true, persistThrottle: storagePersistThrottle, onRehydrated });
|
||||
const store = createStore({ persist: true, persistDebounce: storagePersistDebounce, onRehydrated });
|
||||
setStore(store);
|
||||
$store.set(store);
|
||||
if (import.meta.env.MODE === 'development') {
|
||||
@@ -333,7 +333,7 @@ const InvokeAIUI = ({
|
||||
window.$store = undefined;
|
||||
}
|
||||
};
|
||||
}, [storagePersistThrottle]);
|
||||
}, [storagePersistDebounce]);
|
||||
|
||||
if (!store || !didRehydrate) {
|
||||
return <Loading />;
|
||||
|
||||
@@ -184,7 +184,7 @@ const PERSISTED_KEYS = Object.values(SLICE_CONFIGS)
|
||||
.filter((sliceConfig) => !!sliceConfig.persistConfig)
|
||||
.map((sliceConfig) => sliceConfig.slice.reducerPath);
|
||||
|
||||
export const createStore = (options?: { persist?: boolean; persistThrottle?: number; onRehydrated?: () => void }) => {
|
||||
export const createStore = (options?: { persist?: boolean; persistDebounce?: number; onRehydrated?: () => void }) => {
|
||||
const store = configureStore({
|
||||
reducer: rememberedRootReducer,
|
||||
middleware: (getDefaultMiddleware) =>
|
||||
@@ -204,7 +204,7 @@ export const createStore = (options?: { persist?: boolean; persistThrottle?: num
|
||||
if (options?.persist) {
|
||||
return enhancers.prepend(
|
||||
rememberEnhancer(reduxRememberDriver, PERSISTED_KEYS, {
|
||||
persistThrottle: options?.persistThrottle ?? 2000,
|
||||
persistDebounce: options?.persistDebounce ?? 2000,
|
||||
serialize,
|
||||
unserialize,
|
||||
prefix: '',
|
||||
|
||||
@@ -58,6 +58,7 @@ const zNumericalParameterConfig = z.object({
|
||||
fineStep: z.number().default(8),
|
||||
coarseStep: z.number().default(64),
|
||||
});
|
||||
export type NumericalParameterConfig = z.infer<typeof zNumericalParameterConfig>;
|
||||
|
||||
/**
|
||||
* Configuration options for the InvokeAI UI.
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
import { Alert, AlertIcon, AlertTitle } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const CanvasAlertsBboxVisibility = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const canvasManager = useCanvasManager();
|
||||
const isBboxHidden = useStore(canvasManager.tool.tools.bbox.$isBboxHidden);
|
||||
|
||||
if (!isBboxHidden) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Alert status="warning" borderRadius="base" fontSize="sm" shadow="md" w="fit-content">
|
||||
<AlertIcon />
|
||||
<AlertTitle>{t('controlLayers.warnings.bboxHidden')}</AlertTitle>
|
||||
</Alert>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasAlertsBboxVisibility.displayName = 'CanvasAlertsBboxVisibility';
|
||||
@@ -15,6 +15,7 @@ import { useCanvasEntityQuickSwitchHotkey } from 'features/controlLayers/hooks/u
|
||||
import { useCanvasFilterHotkey } from 'features/controlLayers/hooks/useCanvasFilterHotkey';
|
||||
import { useCanvasInvertMaskHotkey } from 'features/controlLayers/hooks/useCanvasInvertMaskHotkey';
|
||||
import { useCanvasResetLayerHotkey } from 'features/controlLayers/hooks/useCanvasResetLayerHotkey';
|
||||
import { useCanvasToggleBboxHotkey } from 'features/controlLayers/hooks/useCanvasToggleBboxHotkey';
|
||||
import { useCanvasToggleNonRasterLayersHotkey } from 'features/controlLayers/hooks/useCanvasToggleNonRasterLayersHotkey';
|
||||
import { useCanvasTransformHotkey } from 'features/controlLayers/hooks/useCanvasTransformHotkey';
|
||||
import { useCanvasUndoRedoHotkeys } from 'features/controlLayers/hooks/useCanvasUndoRedoHotkeys';
|
||||
@@ -31,6 +32,7 @@ export const CanvasToolbar = memo(() => {
|
||||
useCanvasFilterHotkey();
|
||||
useCanvasInvertMaskHotkey();
|
||||
useCanvasToggleNonRasterLayersHotkey();
|
||||
useCanvasToggleBboxHotkey();
|
||||
|
||||
return (
|
||||
<Flex w="full" gap={2} alignItems="center" px={2}>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import type { AppGetState } from 'app/store/store';
|
||||
import { useAppDispatch, useAppStore } from 'app/store/storeHooks';
|
||||
import { useAppDispatch, useAppSelector, useAppStore } from 'app/store/storeHooks';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import {
|
||||
@@ -16,7 +16,11 @@ import {
|
||||
rgRefImageAdded,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectBase, selectMainModelConfig } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice, selectEntity } from 'features/controlLayers/store/selectors';
|
||||
import {
|
||||
selectCanvasSlice,
|
||||
selectEntity,
|
||||
selectSelectedEntityIdentifier,
|
||||
} from 'features/controlLayers/store/selectors';
|
||||
import type {
|
||||
CanvasEntityIdentifier,
|
||||
CanvasRegionalGuidanceState,
|
||||
@@ -136,37 +140,49 @@ export const getDefaultRegionalGuidanceRefImageConfig = (getState: AppGetState):
|
||||
|
||||
export const useAddControlLayer = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const selectedControlLayer =
|
||||
selectedEntityIdentifier?.type === 'control_layer' ? selectedEntityIdentifier.id : undefined;
|
||||
const func = useCallback(() => {
|
||||
const overrides = { controlAdapter: deepClone(initialControlNet) };
|
||||
dispatch(controlLayerAdded({ isSelected: true, overrides }));
|
||||
}, [dispatch]);
|
||||
dispatch(controlLayerAdded({ isSelected: true, overrides, addAfter: selectedControlLayer }));
|
||||
}, [dispatch, selectedControlLayer]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
export const useAddRasterLayer = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const selectedRasterLayer =
|
||||
selectedEntityIdentifier?.type === 'raster_layer' ? selectedEntityIdentifier.id : undefined;
|
||||
const func = useCallback(() => {
|
||||
dispatch(rasterLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
dispatch(rasterLayerAdded({ isSelected: true, addAfter: selectedRasterLayer }));
|
||||
}, [dispatch, selectedRasterLayer]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
export const useAddInpaintMask = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const selectedInpaintMask =
|
||||
selectedEntityIdentifier?.type === 'inpaint_mask' ? selectedEntityIdentifier.id : undefined;
|
||||
const func = useCallback(() => {
|
||||
dispatch(inpaintMaskAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
dispatch(inpaintMaskAdded({ isSelected: true, addAfter: selectedInpaintMask }));
|
||||
}, [dispatch, selectedInpaintMask]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
export const useAddRegionalGuidance = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const selectedRegionalGuidance =
|
||||
selectedEntityIdentifier?.type === 'regional_guidance' ? selectedEntityIdentifier.id : undefined;
|
||||
const func = useCallback(() => {
|
||||
dispatch(rgAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
dispatch(rgAdded({ isSelected: true, addAfter: selectedRegionalGuidance }));
|
||||
}, [dispatch, selectedRegionalGuidance]);
|
||||
|
||||
return func;
|
||||
};
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
import { useCallback } from 'react';
|
||||
|
||||
export const useCanvasToggleBboxHotkey = () => {
|
||||
const canvasManager = useCanvasManager();
|
||||
|
||||
const handleToggleBboxVisibility = useCallback(() => {
|
||||
canvasManager.tool.tools.bbox.toggleBboxVisibility();
|
||||
}, [canvasManager]);
|
||||
|
||||
useRegisteredHotkeys({
|
||||
id: 'toggleBbox',
|
||||
category: 'canvas',
|
||||
callback: handleToggleBboxVisibility,
|
||||
dependencies: [handleToggleBboxVisibility],
|
||||
});
|
||||
};
|
||||
@@ -372,6 +372,7 @@ export class CanvasCompositorModule extends CanvasModuleBase {
|
||||
position: { x: Math.floor(rect.x), y: Math.floor(rect.y) },
|
||||
},
|
||||
mergedEntitiesToDelete: deleteMergedEntities ? entityIdentifiers.map(mapId) : [],
|
||||
addAfter: entityIdentifiers.map(mapId).at(-1),
|
||||
};
|
||||
|
||||
switch (type) {
|
||||
|
||||
@@ -482,13 +482,24 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
// "contain" means that the entity should be scaled to fit within the bbox, but it should not exceed the bbox.
|
||||
const scale = Math.min(scaleX, scaleY);
|
||||
|
||||
// Center the shape within the bounding box
|
||||
const offsetX = (rect.width - width * scale) / 2;
|
||||
const offsetY = (rect.height - height * scale) / 2;
|
||||
// Calculate the scaled dimensions
|
||||
const scaledWidth = width * scale;
|
||||
const scaledHeight = height * scale;
|
||||
|
||||
// Calculate centered position
|
||||
const centerX = rect.x + (rect.width - scaledWidth) / 2;
|
||||
const centerY = rect.y + (rect.height - scaledHeight) / 2;
|
||||
|
||||
// Round to grid and clamp to valid bounds
|
||||
const roundedX = gridSize > 1 ? roundToMultiple(centerX, gridSize) : centerX;
|
||||
const roundedY = gridSize > 1 ? roundToMultiple(centerY, gridSize) : centerY;
|
||||
|
||||
const x = clamp(roundedX, rect.x, rect.x + rect.width - scaledWidth);
|
||||
const y = clamp(roundedY, rect.y, rect.y + rect.height - scaledHeight);
|
||||
|
||||
this.konva.proxyRect.setAttrs({
|
||||
x: clamp(roundToMultiple(rect.x + offsetX, gridSize), rect.x, rect.x + rect.width),
|
||||
y: clamp(roundToMultiple(rect.y + offsetY, gridSize), rect.y, rect.y + rect.height),
|
||||
x,
|
||||
y,
|
||||
scaleX: scale,
|
||||
scaleY: scale,
|
||||
rotation: 0,
|
||||
@@ -513,16 +524,32 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
const scaleX = rect.width / width;
|
||||
const scaleY = rect.height / height;
|
||||
|
||||
// "cover" is the same as "contain", but we choose the larger scale to cover the shape
|
||||
// "cover" means the entity should cover the entire bbox, potentially overflowing
|
||||
const scale = Math.max(scaleX, scaleY);
|
||||
|
||||
// Center the shape within the bounding box
|
||||
const offsetX = (rect.width - width * scale) / 2;
|
||||
const offsetY = (rect.height - height * scale) / 2;
|
||||
// Calculate the scaled dimensions
|
||||
const scaledWidth = width * scale;
|
||||
const scaledHeight = height * scale;
|
||||
|
||||
// Calculate position - center only if entity exceeds bbox
|
||||
let x = rect.x;
|
||||
let y = rect.y;
|
||||
|
||||
// If scaled width exceeds bbox width, center horizontally
|
||||
if (scaledWidth > rect.width) {
|
||||
const centerX = rect.x + (rect.width - scaledWidth) / 2;
|
||||
x = gridSize > 1 ? roundToMultiple(centerX, gridSize) : centerX;
|
||||
}
|
||||
|
||||
// If scaled height exceeds bbox height, center vertically
|
||||
if (scaledHeight > rect.height) {
|
||||
const centerY = rect.y + (rect.height - scaledHeight) / 2;
|
||||
y = gridSize > 1 ? roundToMultiple(centerY, gridSize) : centerY;
|
||||
}
|
||||
|
||||
this.konva.proxyRect.setAttrs({
|
||||
x: roundToMultiple(rect.x + offsetX, gridSize),
|
||||
y: roundToMultiple(rect.y + offsetY, gridSize),
|
||||
x,
|
||||
y,
|
||||
scaleX: scale,
|
||||
scaleY: scale,
|
||||
rotation: 0,
|
||||
|
||||
@@ -66,6 +66,11 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
|
||||
*/
|
||||
$aspectRatioBuffer = atom(1);
|
||||
|
||||
/**
|
||||
* Buffer to store the visibility of the bbox.
|
||||
*/
|
||||
$isBboxHidden = atom(false);
|
||||
|
||||
constructor(parent: CanvasToolModule) {
|
||||
super();
|
||||
this.id = getPrefixedId(this.type);
|
||||
@@ -191,6 +196,9 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
|
||||
|
||||
// Update on busy state changes
|
||||
this.subscriptions.add(this.manager.$isBusy.listen(this.render));
|
||||
|
||||
// Listen for stage changes to update the bbox's visibility
|
||||
this.subscriptions.add(this.$isBboxHidden.listen(this.render));
|
||||
}
|
||||
|
||||
// This is a noop. The cursor is changed when the cursor enters or leaves the bbox.
|
||||
@@ -206,13 +214,15 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
|
||||
};
|
||||
|
||||
/**
|
||||
* Renders the bbox. The bbox is only visible when the tool is set to 'bbox'.
|
||||
* Renders the bbox.
|
||||
*/
|
||||
render = () => {
|
||||
const tool = this.manager.tool.$tool.get();
|
||||
|
||||
const { x, y, width, height } = this.manager.stateApi.runSelector(selectBbox).rect;
|
||||
|
||||
this.konva.group.visible(!this.$isBboxHidden.get());
|
||||
|
||||
// We need to reach up to the preview layer to enable/disable listening so that the bbox can be interacted with.
|
||||
// If the mangaer is busy, we disable listening so the bbox cannot be interacted with.
|
||||
this.konva.group.listening(tool === 'bbox' && !this.manager.$isBusy.get());
|
||||
@@ -478,4 +488,8 @@ export class CanvasBboxToolModule extends CanvasModuleBase {
|
||||
this.subscriptions.clear();
|
||||
this.konva.group.destroy();
|
||||
};
|
||||
|
||||
toggleBboxVisibility = () => {
|
||||
this.$isBboxHidden.set(!this.$isBboxHidden.get());
|
||||
};
|
||||
}
|
||||
|
||||
@@ -111,12 +111,16 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}>
|
||||
) => {
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [] } = action.payload;
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [], addAfter } = action.payload;
|
||||
const entityState = getRasterLayerState(id, overrides);
|
||||
|
||||
state.rasterLayers.entities.push(entityState);
|
||||
const index = addAfter
|
||||
? state.rasterLayers.entities.findIndex((e) => e.id === addAfter) + 1
|
||||
: state.rasterLayers.entities.length;
|
||||
state.rasterLayers.entities.splice(index, 0, entityState);
|
||||
|
||||
if (mergedEntitiesToDelete.length > 0) {
|
||||
state.rasterLayers.entities = state.rasterLayers.entities.filter(
|
||||
@@ -139,6 +143,7 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}) => ({
|
||||
payload: { ...payload, id: getPrefixedId('raster_layer') },
|
||||
}),
|
||||
@@ -272,13 +277,17 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}>
|
||||
) => {
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [] } = action.payload;
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [], addAfter } = action.payload;
|
||||
|
||||
const entityState = getControlLayerState(id, overrides);
|
||||
|
||||
state.controlLayers.entities.push(entityState);
|
||||
const index = addAfter
|
||||
? state.controlLayers.entities.findIndex((e) => e.id === addAfter) + 1
|
||||
: state.controlLayers.entities.length;
|
||||
state.controlLayers.entities.splice(index, 0, entityState);
|
||||
|
||||
if (mergedEntitiesToDelete.length > 0) {
|
||||
state.controlLayers.entities = state.controlLayers.entities.filter(
|
||||
@@ -300,6 +309,7 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}) => ({
|
||||
payload: { ...payload, id: getPrefixedId('control_layer') },
|
||||
}),
|
||||
@@ -570,13 +580,17 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}>
|
||||
) => {
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [] } = action.payload;
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [], addAfter } = action.payload;
|
||||
|
||||
const entityState = getRegionalGuidanceState(id, overrides);
|
||||
|
||||
state.regionalGuidance.entities.push(entityState);
|
||||
const index = addAfter
|
||||
? state.regionalGuidance.entities.findIndex((e) => e.id === addAfter) + 1
|
||||
: state.regionalGuidance.entities.length;
|
||||
state.regionalGuidance.entities.splice(index, 0, entityState);
|
||||
|
||||
if (mergedEntitiesToDelete.length > 0) {
|
||||
state.regionalGuidance.entities = state.regionalGuidance.entities.filter(
|
||||
@@ -598,6 +612,7 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}) => ({
|
||||
payload: { ...payload, id: getPrefixedId('regional_guidance') },
|
||||
}),
|
||||
@@ -874,13 +889,17 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}>
|
||||
) => {
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [] } = action.payload;
|
||||
const { id, overrides, isSelected, isBookmarked, mergedEntitiesToDelete = [], addAfter } = action.payload;
|
||||
|
||||
const entityState = getInpaintMaskState(id, overrides);
|
||||
|
||||
state.inpaintMasks.entities.push(entityState);
|
||||
const index = addAfter
|
||||
? state.inpaintMasks.entities.findIndex((e) => e.id === addAfter) + 1
|
||||
: state.inpaintMasks.entities.length;
|
||||
state.inpaintMasks.entities.splice(index, 0, entityState);
|
||||
|
||||
if (mergedEntitiesToDelete.length > 0) {
|
||||
state.inpaintMasks.entities = state.inpaintMasks.entities.filter(
|
||||
@@ -902,6 +921,7 @@ const slice = createSlice({
|
||||
isSelected?: boolean;
|
||||
isBookmarked?: boolean;
|
||||
mergedEntitiesToDelete?: string[];
|
||||
addAfter?: string;
|
||||
}) => ({
|
||||
payload: { ...payload, id: getPrefixedId('inpaint_mask') },
|
||||
}),
|
||||
@@ -1249,25 +1269,33 @@ const slice = createSlice({
|
||||
newEntity.name = `${newEntity.name} (Copy)`;
|
||||
}
|
||||
switch (newEntity.type) {
|
||||
case 'raster_layer':
|
||||
case 'raster_layer': {
|
||||
newEntity.id = getPrefixedId('raster_layer');
|
||||
state.rasterLayers.entities.push(newEntity);
|
||||
const newEntityIndex = state.rasterLayers.entities.findIndex((e) => e.id === entityIdentifier.id) + 1;
|
||||
state.rasterLayers.entities.splice(newEntityIndex, 0, newEntity);
|
||||
break;
|
||||
case 'control_layer':
|
||||
}
|
||||
case 'control_layer': {
|
||||
newEntity.id = getPrefixedId('control_layer');
|
||||
state.controlLayers.entities.push(newEntity);
|
||||
const newEntityIndex = state.controlLayers.entities.findIndex((e) => e.id === entityIdentifier.id) + 1;
|
||||
state.controlLayers.entities.splice(newEntityIndex, 0, newEntity);
|
||||
break;
|
||||
case 'regional_guidance':
|
||||
}
|
||||
case 'regional_guidance': {
|
||||
newEntity.id = getPrefixedId('regional_guidance');
|
||||
for (const refImage of newEntity.referenceImages) {
|
||||
refImage.id = getPrefixedId('regional_guidance_ip_adapter');
|
||||
}
|
||||
state.regionalGuidance.entities.push(newEntity);
|
||||
const newEntityIndex = state.regionalGuidance.entities.findIndex((e) => e.id === entityIdentifier.id) + 1;
|
||||
state.regionalGuidance.entities.splice(newEntityIndex, 0, newEntity);
|
||||
break;
|
||||
case 'inpaint_mask':
|
||||
}
|
||||
case 'inpaint_mask': {
|
||||
newEntity.id = getPrefixedId('inpaint_mask');
|
||||
state.inpaintMasks.entities.push(newEntity);
|
||||
const newEntityIndex = state.inpaintMasks.entities.findIndex((e) => e.id === entityIdentifier.id) + 1;
|
||||
state.inpaintMasks.entities.splice(newEntityIndex, 0, newEntity);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
state.selectedEntityIdentifier = getEntityIdentifier(newEntity);
|
||||
|
||||
@@ -107,14 +107,7 @@ const slice = createSlice({
|
||||
return;
|
||||
}
|
||||
|
||||
// Clamp CLIP skip layer count to the bounds of the new model
|
||||
if (model.base === 'sdxl') {
|
||||
// We don't support user-defined CLIP skip for SDXL because it doesn't do anything useful
|
||||
state.clipSkip = 0;
|
||||
} else {
|
||||
const { maxClip } = CLIP_SKIP_MAP[model.base];
|
||||
state.clipSkip = clamp(state.clipSkip, 0, maxClip);
|
||||
}
|
||||
applyClipSkip(state, model, state.clipSkip);
|
||||
},
|
||||
vaeSelected: (state, action: PayloadAction<ParameterVAEModel | null>) => {
|
||||
// null is a valid VAE!
|
||||
@@ -170,7 +163,7 @@ const slice = createSlice({
|
||||
state.vaePrecision = action.payload;
|
||||
},
|
||||
setClipSkip: (state, action: PayloadAction<number>) => {
|
||||
state.clipSkip = action.payload;
|
||||
applyClipSkip(state, state.model, action.payload);
|
||||
},
|
||||
shouldUseCpuNoiseChanged: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldUseCpuNoise = action.payload;
|
||||
@@ -181,15 +174,6 @@ const slice = createSlice({
|
||||
negativePromptChanged: (state, action: PayloadAction<ParameterNegativePrompt>) => {
|
||||
state.negativePrompt = action.payload;
|
||||
},
|
||||
positivePrompt2Changed: (state, action: PayloadAction<string>) => {
|
||||
state.positivePrompt2 = action.payload;
|
||||
},
|
||||
negativePrompt2Changed: (state, action: PayloadAction<string>) => {
|
||||
state.negativePrompt2 = action.payload;
|
||||
},
|
||||
shouldConcatPromptsChanged: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldConcatPrompts = action.payload;
|
||||
},
|
||||
refinerModelChanged: (state, action: PayloadAction<ParameterSDXLRefinerModel | null>) => {
|
||||
const result = zParamsState.shape.refinerModel.safeParse(action.payload);
|
||||
if (!result.success) {
|
||||
@@ -375,6 +359,33 @@ const slice = createSlice({
|
||||
},
|
||||
});
|
||||
|
||||
const applyClipSkip = (state: { clipSkip: number }, model: ParameterModel | null, clipSkip: number) => {
|
||||
if (model === null) {
|
||||
return;
|
||||
}
|
||||
|
||||
const maxClip = getModelMaxClipSkip(model);
|
||||
|
||||
state.clipSkip = clamp(clipSkip, 0, maxClip);
|
||||
};
|
||||
|
||||
const hasModelClipSkip = (model: ParameterModel | null) => {
|
||||
if (model === null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return getModelMaxClipSkip(model) > 0;
|
||||
};
|
||||
|
||||
const getModelMaxClipSkip = (model: ParameterModel) => {
|
||||
if (model.base === 'sdxl') {
|
||||
// We don't support user-defined CLIP skip for SDXL because it doesn't do anything useful
|
||||
return 0;
|
||||
}
|
||||
|
||||
return CLIP_SKIP_MAP[model.base].maxClip;
|
||||
};
|
||||
|
||||
const resetState = (state: ParamsState): ParamsState => {
|
||||
// When a new session is requested, we need to keep the current model selections, plus dependent state
|
||||
// like VAE precision. Everything else gets reset to default.
|
||||
@@ -425,9 +436,6 @@ export const {
|
||||
shouldUseCpuNoiseChanged,
|
||||
positivePromptChanged,
|
||||
negativePromptChanged,
|
||||
positivePrompt2Changed,
|
||||
negativePrompt2Changed,
|
||||
shouldConcatPromptsChanged,
|
||||
refinerModelChanged,
|
||||
setRefinerSteps,
|
||||
setRefinerCFGScale,
|
||||
@@ -460,8 +468,7 @@ export const paramsSliceConfig: SliceConfig<typeof slice> = {
|
||||
};
|
||||
|
||||
export const selectParamsSlice = (state: RootState) => state.params;
|
||||
export const createParamsSelector = <T>(selector: Selector<ParamsState, T>) =>
|
||||
createSelector(selectParamsSlice, selector);
|
||||
const createParamsSelector = <T>(selector: Selector<ParamsState, T>) => createSelector(selectParamsSlice, selector);
|
||||
|
||||
export const selectBase = createParamsSelector((params) => params.model?.base);
|
||||
export const selectIsSDXL = createParamsSelector((params) => params.model?.base === 'sdxl');
|
||||
@@ -497,7 +504,8 @@ export const selectCFGScale = createParamsSelector((params) => params.cfgScale);
|
||||
export const selectGuidance = createParamsSelector((params) => params.guidance);
|
||||
export const selectSteps = createParamsSelector((params) => params.steps);
|
||||
export const selectCFGRescaleMultiplier = createParamsSelector((params) => params.cfgRescaleMultiplier);
|
||||
export const selectCLIPSKip = createParamsSelector((params) => params.clipSkip);
|
||||
export const selectCLIPSkip = createParamsSelector((params) => params.clipSkip);
|
||||
export const selectHasModelCLIPSkip = createParamsSelector((params) => hasModelClipSkip(params.model));
|
||||
export const selectCanvasCoherenceEdgeSize = createParamsSelector((params) => params.canvasCoherenceEdgeSize);
|
||||
export const selectCanvasCoherenceMinDenoise = createParamsSelector((params) => params.canvasCoherenceMinDenoise);
|
||||
export const selectCanvasCoherenceMode = createParamsSelector((params) => params.canvasCoherenceMode);
|
||||
@@ -518,9 +526,6 @@ export const selectModelSupportsNegativePrompt = createSelector(
|
||||
[selectIsFLUX, selectIsChatGPT4o, selectIsFluxKontext],
|
||||
(isFLUX, isChatGPT4o, isFluxKontext) => !isFLUX && !isChatGPT4o && !isFluxKontext
|
||||
);
|
||||
export const selectPositivePrompt2 = createParamsSelector((params) => params.positivePrompt2);
|
||||
export const selectNegativePrompt2 = createParamsSelector((params) => params.negativePrompt2);
|
||||
export const selectShouldConcatPrompts = createParamsSelector((params) => params.shouldConcatPrompts);
|
||||
export const selectScheduler = createParamsSelector((params) => params.scheduler);
|
||||
export const selectSeamlessXAxis = createParamsSelector((params) => params.seamlessXAxis);
|
||||
export const selectSeamlessYAxis = createParamsSelector((params) => params.seamlessYAxis);
|
||||
|
||||
@@ -14,9 +14,7 @@ import {
|
||||
zParameterMaskBlurMethod,
|
||||
zParameterModel,
|
||||
zParameterNegativePrompt,
|
||||
zParameterNegativeStylePromptSDXL,
|
||||
zParameterPositivePrompt,
|
||||
zParameterPositiveStylePromptSDXL,
|
||||
zParameterPrecision,
|
||||
zParameterScheduler,
|
||||
zParameterSDXLRefinerModel,
|
||||
@@ -534,9 +532,6 @@ export const zParamsState = z.object({
|
||||
shouldUseCpuNoise: z.boolean(),
|
||||
positivePrompt: zParameterPositivePrompt,
|
||||
negativePrompt: zParameterNegativePrompt,
|
||||
positivePrompt2: zParameterPositiveStylePromptSDXL,
|
||||
negativePrompt2: zParameterNegativeStylePromptSDXL,
|
||||
shouldConcatPrompts: z.boolean(),
|
||||
refinerModel: zParameterSDXLRefinerModel.nullable(),
|
||||
refinerSteps: z.number(),
|
||||
refinerCFGScale: z.number(),
|
||||
@@ -584,9 +579,6 @@ export const getInitialParamsState = (): ParamsState => ({
|
||||
shouldUseCpuNoise: true,
|
||||
positivePrompt: '',
|
||||
negativePrompt: null,
|
||||
positivePrompt2: '',
|
||||
negativePrompt2: '',
|
||||
shouldConcatPrompts: true,
|
||||
refinerModel: null,
|
||||
refinerSteps: 20,
|
||||
refinerCFGScale: 7.5,
|
||||
|
||||
@@ -7,13 +7,7 @@ import { useGallerySearchTerm } from 'features/gallery/components/ImageGrid/useG
|
||||
import { selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { galleryViewChanged, selectGallerySlice } from 'features/gallery/store/gallerySlice';
|
||||
import { useAutoLayoutContext } from 'features/ui/layouts/auto-layout-context';
|
||||
import {
|
||||
GALLERY_PANEL_DEFAULT_HEIGHT_PX,
|
||||
GALLERY_PANEL_ID,
|
||||
GALLERY_PANEL_MIN_EXPANDED_HEIGHT_PX,
|
||||
GALLERY_PANEL_MIN_HEIGHT_PX,
|
||||
} from 'features/ui/layouts/shared';
|
||||
import { useCollapsibleGridviewPanel } from 'features/ui/layouts/use-collapsible-gridview-panel';
|
||||
import { useGalleryPanel } from 'features/ui/layouts/use-gallery-panel';
|
||||
import type { CSSProperties } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -34,16 +28,8 @@ export const GalleryPanel = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const { tab } = useAutoLayoutContext();
|
||||
const collapsibleApi = useCollapsibleGridviewPanel(
|
||||
tab,
|
||||
GALLERY_PANEL_ID,
|
||||
'vertical',
|
||||
GALLERY_PANEL_DEFAULT_HEIGHT_PX,
|
||||
GALLERY_PANEL_MIN_HEIGHT_PX,
|
||||
GALLERY_PANEL_MIN_EXPANDED_HEIGHT_PX
|
||||
);
|
||||
const isCollapsed = useStore(collapsibleApi.$isCollapsed);
|
||||
|
||||
const galleryPanel = useGalleryPanel(tab);
|
||||
const isCollapsed = useStore(galleryPanel.$isCollapsed);
|
||||
const galleryView = useAppSelector(selectGalleryView);
|
||||
const initialSearchTerm = useAppSelector(selectSearchTerm);
|
||||
const searchDisclosure = useDisclosure(!!initialSearchTerm);
|
||||
@@ -58,11 +44,11 @@ export const GalleryPanel = memo(() => {
|
||||
|
||||
const handleClickSearch = useCallback(() => {
|
||||
onResetSearchTerm();
|
||||
if (!searchDisclosure.isOpen && collapsibleApi.$isCollapsed.get()) {
|
||||
collapsibleApi.expand();
|
||||
if (!searchDisclosure.isOpen && galleryPanel.$isCollapsed.get()) {
|
||||
galleryPanel.expand();
|
||||
}
|
||||
searchDisclosure.toggle();
|
||||
}, [collapsibleApi, onResetSearchTerm, searchDisclosure]);
|
||||
}, [galleryPanel, onResetSearchTerm, searchDisclosure]);
|
||||
|
||||
const selectedBoardId = useAppSelector(selectSelectedBoardId);
|
||||
const boardName = useBoardName(selectedBoardId);
|
||||
@@ -73,7 +59,7 @@ export const GalleryPanel = memo(() => {
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
onClick={collapsibleApi.toggle}
|
||||
onClick={galleryPanel.toggle}
|
||||
leftIcon={isCollapsed ? <PiCaretDownBold /> : <PiCaretUpBold />}
|
||||
noOfLines={1}
|
||||
>
|
||||
|
||||
@@ -40,7 +40,7 @@ export const GallerySettingsPopover = memo(() => {
|
||||
<PopoverBody>
|
||||
<Flex direction="column" gap={2}>
|
||||
<Text fontWeight="semibold" color="base.300">
|
||||
Gallery Settings
|
||||
{t('gallery.gallerySettings')}
|
||||
</Text>
|
||||
|
||||
<Divider />
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
|
||||
import { boardIdSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { navigationApi } from 'features/ui/layouts/navigation-api';
|
||||
import { useGalleryPanel } from 'features/ui/layouts/use-gallery-panel';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { flushSync } from 'react-dom';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCrosshairBold } from 'react-icons/pi';
|
||||
|
||||
export const ImageMenuItemLocateInGalery = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const imageDTO = useImageDTOContext();
|
||||
const activeTab = useAppSelector(selectActiveTab);
|
||||
const galleryPanel = useGalleryPanel(activeTab);
|
||||
|
||||
const isGalleryImage = useMemo(() => {
|
||||
return !imageDTO.is_intermediate;
|
||||
}, [imageDTO]);
|
||||
|
||||
const onClick = useCallback(() => {
|
||||
navigationApi.expandRightPanel();
|
||||
galleryPanel.expand();
|
||||
flushSync(() => {
|
||||
dispatch(boardIdSelected({ boardId: imageDTO.board_id ?? 'none', selectedImageName: imageDTO.image_name }));
|
||||
});
|
||||
}, [dispatch, galleryPanel, imageDTO]);
|
||||
|
||||
return (
|
||||
<MenuItem icon={<PiCrosshairBold />} onClickCapture={onClick} isDisabled={!isGalleryImage}>
|
||||
{t('boards.locateInGalery')}
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
ImageMenuItemLocateInGalery.displayName = 'ImageMenuItemLocateInGalery';
|
||||
@@ -2,6 +2,7 @@ import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
|
||||
import { useRecallAll } from 'features/gallery/hooks/useRecallAll';
|
||||
import { useRecallCLIPSkip } from 'features/gallery/hooks/useRecallCLIPSkip';
|
||||
import { useRecallDimensions } from 'features/gallery/hooks/useRecallDimensions';
|
||||
import { useRecallPrompts } from 'features/gallery/hooks/useRecallPrompts';
|
||||
import { useRecallRemix } from 'features/gallery/hooks/useRecallRemix';
|
||||
@@ -17,7 +18,7 @@ import {
|
||||
PiRulerBold,
|
||||
} from 'react-icons/pi';
|
||||
|
||||
export const ImageMenuItemMetadataRecallActions = memo(() => {
|
||||
export const ImageMenuItemMetadataRecallActionsCanvasGenerateTabs = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const subMenu = useSubMenu();
|
||||
|
||||
@@ -28,6 +29,7 @@ export const ImageMenuItemMetadataRecallActions = memo(() => {
|
||||
const recallPrompts = useRecallPrompts(imageDTO);
|
||||
const recallSeed = useRecallSeed(imageDTO);
|
||||
const recallDimensions = useRecallDimensions(imageDTO);
|
||||
const recallCLIPSkip = useRecallCLIPSkip(imageDTO);
|
||||
|
||||
return (
|
||||
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiArrowBendUpLeftBold />}>
|
||||
@@ -55,10 +57,14 @@ export const ImageMenuItemMetadataRecallActions = memo(() => {
|
||||
<MenuItem icon={<PiRulerBold />} onClick={recallDimensions.recall} isDisabled={!recallDimensions.isEnabled}>
|
||||
{t('parameters.useSize')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiRulerBold />} onClick={recallCLIPSkip.recall} isDisabled={!recallCLIPSkip.isEnabled}>
|
||||
{t('parameters.useClipSkip')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
ImageMenuItemMetadataRecallActions.displayName = 'ImageMenuItemMetadataRecallActions';
|
||||
ImageMenuItemMetadataRecallActionsCanvasGenerateTabs.displayName =
|
||||
'ImageMenuItemMetadataRecallActionsCanvasGenerateTabs';
|
||||
@@ -0,0 +1,38 @@
|
||||
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
|
||||
import { useRecallPrompts } from 'features/gallery/hooks/useRecallPrompts';
|
||||
import { useRecallSeed } from 'features/gallery/hooks/useRecallSeed';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowBendUpLeftBold, PiPlantBold, PiQuotesBold } from 'react-icons/pi';
|
||||
|
||||
export const ImageMenuItemMetadataRecallActionsUpscaleTab = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const subMenu = useSubMenu();
|
||||
|
||||
const imageDTO = useImageDTOContext();
|
||||
|
||||
const recallPrompts = useRecallPrompts(imageDTO);
|
||||
const recallSeed = useRecallSeed(imageDTO);
|
||||
|
||||
return (
|
||||
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiArrowBendUpLeftBold />}>
|
||||
<Menu {...subMenu.menuProps}>
|
||||
<MenuButton {...subMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('parameters.recallMetadata')} />
|
||||
</MenuButton>
|
||||
<MenuList {...subMenu.menuListProps}>
|
||||
<MenuItem icon={<PiQuotesBold />} onClick={recallPrompts.recall} isDisabled={!recallPrompts.isEnabled}>
|
||||
{t('parameters.usePrompt')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlantBold />} onClick={recallSeed.recall} isDisabled={!recallSeed.isEnabled}>
|
||||
{t('parameters.useSeed')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
ImageMenuItemMetadataRecallActionsUpscaleTab.displayName = 'ImageMenuItemMetadataRecallActionsUpscaleTab';
|
||||
@@ -6,7 +6,8 @@ import { ImageMenuItemCopy } from 'features/gallery/components/ImageContextMenu/
|
||||
import { ImageMenuItemDelete } from 'features/gallery/components/ImageContextMenu/ImageMenuItemDelete';
|
||||
import { ImageMenuItemDownload } from 'features/gallery/components/ImageContextMenu/ImageMenuItemDownload';
|
||||
import { ImageMenuItemLoadWorkflow } from 'features/gallery/components/ImageContextMenu/ImageMenuItemLoadWorkflow';
|
||||
import { ImageMenuItemMetadataRecallActions } from 'features/gallery/components/ImageContextMenu/ImageMenuItemMetadataRecallActions';
|
||||
import { ImageMenuItemLocateInGalery } from 'features/gallery/components/ImageContextMenu/ImageMenuItemLocateInGalery';
|
||||
import { ImageMenuItemMetadataRecallActionsCanvasGenerateTabs } from 'features/gallery/components/ImageContextMenu/ImageMenuItemMetadataRecallActionsCanvasGenerateTabs';
|
||||
import { ImageMenuItemNewCanvasFromImageSubMenu } from 'features/gallery/components/ImageContextMenu/ImageMenuItemNewCanvasFromImageSubMenu';
|
||||
import { ImageMenuItemNewLayerFromImageSubMenu } from 'features/gallery/components/ImageContextMenu/ImageMenuItemNewLayerFromImageSubMenu';
|
||||
import { ImageMenuItemOpenInNewTab } from 'features/gallery/components/ImageContextMenu/ImageMenuItemOpenInNewTab';
|
||||
@@ -21,6 +22,7 @@ import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import { memo } from 'react';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
import { ImageMenuItemMetadataRecallActionsUpscaleTab } from './ImageMenuItemMetadataRecallActionsUpscaleTab';
|
||||
import { ImageMenuItemUseAsPromptTemplate } from './ImageMenuItemUseAsPromptTemplate';
|
||||
|
||||
type SingleSelectionMenuItemsProps = {
|
||||
@@ -42,7 +44,8 @@ const SingleSelectionMenuItems = ({ imageDTO }: SingleSelectionMenuItemsProps) =
|
||||
</IconMenuItemGroup>
|
||||
<MenuDivider />
|
||||
<ImageMenuItemLoadWorkflow />
|
||||
{(tab === 'canvas' || tab === 'generate') && <ImageMenuItemMetadataRecallActions />}
|
||||
{(tab === 'canvas' || tab === 'generate') && <ImageMenuItemMetadataRecallActionsCanvasGenerateTabs />}
|
||||
{tab === 'upscaling' && <ImageMenuItemMetadataRecallActionsUpscaleTab />}
|
||||
<MenuDivider />
|
||||
<ImageMenuItemSendToUpscale />
|
||||
<ImageMenuItemUseForPromptGeneration />
|
||||
@@ -53,6 +56,11 @@ const SingleSelectionMenuItems = ({ imageDTO }: SingleSelectionMenuItemsProps) =
|
||||
<MenuDivider />
|
||||
<ImageMenuItemChangeBoard />
|
||||
<ImageMenuItemStarUnstar />
|
||||
{(tab === 'canvas' || tab === 'generate' || tab === 'workflows' || tab === 'upscaling') &&
|
||||
!imageDTO.is_intermediate && (
|
||||
// Only render this button on tabs with a gallery.
|
||||
<ImageMenuItemLocateInGalery />
|
||||
)}
|
||||
</ImageDTOContextProvider>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -33,8 +33,6 @@ const ImageMetadataActions = (props: Props) => {
|
||||
<UnrecallableMetadataDatum metadata={metadata} handler={MetadataHandlers.GenerationMode} />
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.PositivePrompt} />
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.NegativePrompt} />
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.PositiveStylePrompt} />
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.NegativeStylePrompt} />
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.MainModel} />
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.VAEModel} />
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.Width} />
|
||||
@@ -42,6 +40,7 @@ const ImageMetadataActions = (props: Props) => {
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.Seed} />
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.Steps} />
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.Scheduler} />
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.CLIPSkip} />
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.CFGScale} />
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.CFGRescaleMultiplier} />
|
||||
<SingleMetadataDatum metadata={metadata} handler={MetadataHandlers.Guidance} />
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { Button, Divider, IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { DeleteImageButton } from 'features/deleteImageModal/components/DeleteImageButton';
|
||||
import SingleSelectionMenuItems from 'features/gallery/components/ImageContextMenu/SingleSelectionMenuItems';
|
||||
import { useDeleteImage } from 'features/gallery/hooks/useDeleteImage';
|
||||
@@ -10,14 +10,19 @@ import { useRecallDimensions } from 'features/gallery/hooks/useRecallDimensions'
|
||||
import { useRecallPrompts } from 'features/gallery/hooks/useRecallPrompts';
|
||||
import { useRecallRemix } from 'features/gallery/hooks/useRecallRemix';
|
||||
import { useRecallSeed } from 'features/gallery/hooks/useRecallSeed';
|
||||
import { boardIdSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { PostProcessingPopover } from 'features/parameters/components/PostProcessing/PostProcessingPopover';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { navigationApi } from 'features/ui/layouts/navigation-api';
|
||||
import { useGalleryPanel } from 'features/ui/layouts/use-gallery-panel';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import { memo } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { flushSync } from 'react-dom';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import {
|
||||
PiArrowsCounterClockwiseBold,
|
||||
PiAsteriskBold,
|
||||
PiCrosshairBold,
|
||||
PiDotsThreeOutlineFill,
|
||||
PiFlowArrowBold,
|
||||
PiPencilBold,
|
||||
@@ -30,7 +35,25 @@ import type { ImageDTO } from 'services/api/types';
|
||||
export const CurrentImageButtons = memo(({ imageDTO }: { imageDTO: ImageDTO }) => {
|
||||
const { t } = useTranslation();
|
||||
const tab = useAppSelector(selectActiveTab);
|
||||
const dispatch = useAppDispatch();
|
||||
const activeTab = useAppSelector(selectActiveTab);
|
||||
const galleryPanel = useGalleryPanel(activeTab);
|
||||
|
||||
const isGalleryImage = useMemo(() => {
|
||||
return !imageDTO.is_intermediate;
|
||||
}, [imageDTO]);
|
||||
|
||||
const locateInGallery = useCallback(() => {
|
||||
navigationApi.expandRightPanel();
|
||||
galleryPanel.expand();
|
||||
flushSync(() => {
|
||||
dispatch(boardIdSelected({ boardId: imageDTO.board_id ?? 'none', selectedImageName: imageDTO.image_name }));
|
||||
});
|
||||
}, [dispatch, galleryPanel, imageDTO]);
|
||||
|
||||
const isCanvasOrGenerateTab = tab === 'canvas' || tab === 'generate';
|
||||
const isCanvasOrGenerateOrUpscalingTab = tab === 'canvas' || tab === 'generate' || tab === 'upscaling';
|
||||
const doesTabHaveGallery = tab === 'canvas' || tab === 'generate' || tab === 'workflows' || tab === 'upscaling';
|
||||
|
||||
const isUpscalingEnabled = useFeatureStatus('upscaling');
|
||||
|
||||
@@ -74,6 +97,17 @@ export const CurrentImageButtons = memo(({ imageDTO }: { imageDTO: ImageDTO }) =
|
||||
|
||||
<Divider orientation="vertical" h={8} mx={2} />
|
||||
|
||||
{doesTabHaveGallery && isGalleryImage && (
|
||||
<IconButton
|
||||
icon={<PiCrosshairBold />}
|
||||
aria-label={t('boards.locateInGalery')}
|
||||
tooltip={t('boards.locateInGalery')}
|
||||
onClick={locateInGallery}
|
||||
variant="link"
|
||||
size="sm"
|
||||
alignSelf="stretch"
|
||||
/>
|
||||
)}
|
||||
<IconButton
|
||||
icon={<PiFlowArrowBold />}
|
||||
tooltip={`${t('nodes.loadWorkflow')} (W)`}
|
||||
@@ -94,7 +128,7 @@ export const CurrentImageButtons = memo(({ imageDTO }: { imageDTO: ImageDTO }) =
|
||||
onClick={recallRemix.recall}
|
||||
/>
|
||||
)}
|
||||
{isCanvasOrGenerateTab && (
|
||||
{isCanvasOrGenerateOrUpscalingTab && (
|
||||
<IconButton
|
||||
icon={<PiQuotesBold />}
|
||||
tooltip={`${t('parameters.usePrompt')} (P)`}
|
||||
@@ -105,7 +139,7 @@ export const CurrentImageButtons = memo(({ imageDTO }: { imageDTO: ImageDTO }) =
|
||||
onClick={recallPrompts.recall}
|
||||
/>
|
||||
)}
|
||||
{isCanvasOrGenerateTab && (
|
||||
{isCanvasOrGenerateOrUpscalingTab && (
|
||||
<IconButton
|
||||
icon={<PiPlantBold />}
|
||||
tooltip={`${t('parameters.useSeed')} (S)`}
|
||||
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
selectGalleryImageMinimumWidth,
|
||||
selectImageToCompare,
|
||||
selectLastSelectedImage,
|
||||
selectSelection,
|
||||
selectSelectionCount,
|
||||
} from 'features/gallery/store/gallerySelectors';
|
||||
import { imageToCompareChanged, selectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
@@ -138,6 +139,7 @@ const scrollIntoView = (
|
||||
) => {
|
||||
if (range.endIndex === 0) {
|
||||
// No range is rendered; no need to scroll to anything.
|
||||
log.trace('Not scrolling into view: Range endIdex is 0');
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -145,6 +147,7 @@ const scrollIntoView = (
|
||||
|
||||
if (targetIndex === -1) {
|
||||
// The image isn't in the currently rendered list.
|
||||
log.trace('Not scrolling into view: targetIndex is -1');
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -154,12 +157,28 @@ const scrollIntoView = (
|
||||
|
||||
if (!targetItem) {
|
||||
if (targetIndex > range.endIndex) {
|
||||
log.trace(
|
||||
{
|
||||
index: targetIndex,
|
||||
behavior: 'auto',
|
||||
align: 'start',
|
||||
},
|
||||
'Scrolling into view: not in DOM'
|
||||
);
|
||||
virtuosoGridHandle.scrollToIndex({
|
||||
index: targetIndex,
|
||||
behavior: 'auto',
|
||||
align: 'start',
|
||||
});
|
||||
} else if (targetIndex < range.startIndex) {
|
||||
log.trace(
|
||||
{
|
||||
index: targetIndex,
|
||||
behavior: 'auto',
|
||||
align: 'end',
|
||||
},
|
||||
'Scrolling into view: not in DOM'
|
||||
);
|
||||
virtuosoGridHandle.scrollToIndex({
|
||||
index: targetIndex,
|
||||
behavior: 'auto',
|
||||
@@ -180,12 +199,28 @@ const scrollIntoView = (
|
||||
const rootRect = rootEl.getBoundingClientRect();
|
||||
|
||||
if (itemRect.top < rootRect.top) {
|
||||
log.trace(
|
||||
{
|
||||
index: targetIndex,
|
||||
behavior: 'auto',
|
||||
align: 'start',
|
||||
},
|
||||
'Scrolling into view: in overscan'
|
||||
);
|
||||
virtuosoGridHandle.scrollToIndex({
|
||||
index: targetIndex,
|
||||
behavior: 'auto',
|
||||
align: 'start',
|
||||
});
|
||||
} else if (itemRect.bottom > rootRect.bottom) {
|
||||
log.trace(
|
||||
{
|
||||
index: targetIndex,
|
||||
behavior: 'auto',
|
||||
align: 'end',
|
||||
},
|
||||
'Scrolling into view: in overscan'
|
||||
);
|
||||
virtuosoGridHandle.scrollToIndex({
|
||||
index: targetIndex,
|
||||
behavior: 'auto',
|
||||
@@ -193,6 +228,7 @@ const scrollIntoView = (
|
||||
});
|
||||
} else {
|
||||
// Image is already in view
|
||||
log.debug('Not scrolling into view: Image is already in view');
|
||||
}
|
||||
|
||||
return;
|
||||
@@ -392,9 +428,10 @@ const useKeepSelectedImageInView = (
|
||||
rootRef: React.RefObject<HTMLDivElement>,
|
||||
rangeRef: MutableRefObject<ListRange>
|
||||
) => {
|
||||
const targetImageName = useAppSelector(selectLastSelectedImage);
|
||||
const selection = useAppSelector(selectSelection);
|
||||
|
||||
useEffect(() => {
|
||||
const targetImageName = selection.at(-1);
|
||||
const virtuosoGridHandle = virtuosoRef.current;
|
||||
const rootEl = rootRef.current;
|
||||
const range = rangeRef.current;
|
||||
@@ -402,8 +439,11 @@ const useKeepSelectedImageInView = (
|
||||
if (!virtuosoGridHandle || !rootEl || !targetImageName || !imageNames || imageNames.length === 0) {
|
||||
return;
|
||||
}
|
||||
scrollIntoView(targetImageName, imageNames, rootEl, virtuosoGridHandle, range);
|
||||
}, [targetImageName, imageNames, rangeRef, rootRef, virtuosoRef]);
|
||||
|
||||
setTimeout(() => {
|
||||
scrollIntoView(targetImageName, imageNames, rootEl, virtuosoGridHandle, range);
|
||||
}, 0);
|
||||
}, [imageNames, rangeRef, rootRef, virtuosoRef, selection]);
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -0,0 +1,72 @@
|
||||
import { useAppSelector, useAppStore } from 'app/store/storeHooks';
|
||||
import { selectHasModelCLIPSkip } from 'features/controlLayers/store/paramsSlice';
|
||||
import { MetadataHandlers, MetadataUtils } from 'features/metadata/parsing';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import type { TabName } from 'features/ui/store/uiTypes';
|
||||
import { useCallback, useEffect, useMemo, useState } from 'react';
|
||||
import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
const ALLOWED_TABS: TabName[] = ['canvas', 'generate', 'upscaling'];
|
||||
|
||||
export const useRecallCLIPSkip = (imageDTO: ImageDTO) => {
|
||||
const store = useAppStore();
|
||||
const hasModelCLIPSkip = useAppSelector(selectHasModelCLIPSkip);
|
||||
const tab = useAppSelector(selectActiveTab);
|
||||
const [hasCLIPSkip, setHasCLIPSkip] = useState(false);
|
||||
|
||||
const { metadata, isLoading } = useDebouncedMetadata(imageDTO.image_name);
|
||||
|
||||
useEffect(() => {
|
||||
const parse = async () => {
|
||||
try {
|
||||
await MetadataHandlers.CLIPSkip.parse(metadata, store);
|
||||
setHasCLIPSkip(true);
|
||||
} catch {
|
||||
setHasCLIPSkip(false);
|
||||
}
|
||||
};
|
||||
|
||||
if (!hasModelCLIPSkip) {
|
||||
setHasCLIPSkip(false);
|
||||
return;
|
||||
}
|
||||
|
||||
parse();
|
||||
}, [metadata, store, hasModelCLIPSkip]);
|
||||
|
||||
const isEnabled = useMemo(() => {
|
||||
if (isLoading) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ALLOWED_TABS.includes(tab)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!metadata) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!hasCLIPSkip) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}, [hasCLIPSkip, isLoading, metadata, tab]);
|
||||
|
||||
const recall = useCallback(() => {
|
||||
if (!metadata) {
|
||||
return;
|
||||
}
|
||||
if (!isEnabled) {
|
||||
return;
|
||||
}
|
||||
MetadataUtils.recallByHandler({ metadata, handler: MetadataHandlers.CLIPSkip, store });
|
||||
}, [metadata, isEnabled, store]);
|
||||
|
||||
return {
|
||||
recall,
|
||||
isEnabled,
|
||||
};
|
||||
};
|
||||
@@ -1,12 +1,15 @@
|
||||
import { useAppSelector, useAppStore } from 'app/store/storeHooks';
|
||||
import { MetadataHandlers, MetadataUtils } from 'features/metadata/parsing';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import type { TabName } from 'features/ui/store/uiTypes';
|
||||
import { useCallback, useEffect, useMemo, useState } from 'react';
|
||||
import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
import { useClearStylePresetWithToast } from './useClearStylePresetWithToast';
|
||||
|
||||
const ALLOWED_TABS: TabName[] = ['canvas', 'generate', 'upscaling'];
|
||||
|
||||
export const useRecallPrompts = (imageDTO: ImageDTO) => {
|
||||
const store = useAppStore();
|
||||
const tab = useAppSelector(selectActiveTab);
|
||||
@@ -19,12 +22,7 @@ export const useRecallPrompts = (imageDTO: ImageDTO) => {
|
||||
const parse = async () => {
|
||||
try {
|
||||
const result = await MetadataUtils.hasMetadataByHandlers({
|
||||
handlers: [
|
||||
MetadataHandlers.PositivePrompt,
|
||||
MetadataHandlers.NegativePrompt,
|
||||
MetadataHandlers.PositiveStylePrompt,
|
||||
MetadataHandlers.NegativeStylePrompt,
|
||||
],
|
||||
handlers: [MetadataHandlers.PositivePrompt, MetadataHandlers.NegativePrompt],
|
||||
metadata,
|
||||
store,
|
||||
require: 'some',
|
||||
@@ -43,7 +41,7 @@ export const useRecallPrompts = (imageDTO: ImageDTO) => {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (tab !== 'canvas' && tab !== 'generate') {
|
||||
if (!ALLOWED_TABS.includes(tab)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
import { useAppSelector, useAppStore } from 'app/store/storeHooks';
|
||||
import { MetadataHandlers, MetadataUtils } from 'features/metadata/parsing';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import type { TabName } from 'features/ui/store/uiTypes';
|
||||
import { useCallback, useEffect, useMemo, useState } from 'react';
|
||||
import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
const ALLOWED_TABS: TabName[] = ['canvas', 'generate', 'upscaling'];
|
||||
|
||||
export const useRecallSeed = (imageDTO: ImageDTO) => {
|
||||
const store = useAppStore();
|
||||
const tab = useAppSelector(selectActiveTab);
|
||||
@@ -30,7 +33,7 @@ export const useRecallSeed = (imageDTO: ImageDTO) => {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (tab !== 'canvas' && tab !== 'generate') {
|
||||
if (!ALLOWED_TABS.includes(tab)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { objectEquals } from '@observ33r/object-equals';
|
||||
import type { PayloadAction } from '@reduxjs/toolkit';
|
||||
import { createSlice } from '@reduxjs/toolkit';
|
||||
import type { RootState } from 'app/store/store';
|
||||
@@ -43,54 +42,16 @@ const slice = createSlice({
|
||||
initialState: getInitialState(),
|
||||
reducers: {
|
||||
imageSelected: (state, action: PayloadAction<string | null>) => {
|
||||
// Let's be efficient here and not update the selection unless it has actually changed. This helps to prevent
|
||||
// unnecessary re-renders of the gallery.
|
||||
|
||||
const selectedImageName = action.payload;
|
||||
|
||||
// If we got `null`, clear the selection
|
||||
if (!selectedImageName) {
|
||||
// But only if we have images selected
|
||||
if (state.selection.length > 0) {
|
||||
state.selection = [];
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// If we have multiple images selected, clear the selection and select the new image
|
||||
if (state.selection.length !== 1) {
|
||||
state.selection = [];
|
||||
} else {
|
||||
state.selection = [selectedImageName];
|
||||
return;
|
||||
}
|
||||
|
||||
// If the selected image is different from the current selection, clear the selection and select the new image
|
||||
if (state.selection[0] !== selectedImageName) {
|
||||
state.selection = [selectedImageName];
|
||||
return;
|
||||
}
|
||||
|
||||
// Else we have the same image selected, do nothing
|
||||
},
|
||||
selectionChanged: (state, action: PayloadAction<string[]>) => {
|
||||
// Let's be efficient here and not update the selection unless it has actually changed. This helps to prevent
|
||||
// unnecessary re-renders of the gallery.
|
||||
|
||||
// Remove duplicates from the selection
|
||||
const newSelection = uniq(action.payload);
|
||||
|
||||
// If the new selection has a different length, update the selection
|
||||
if (newSelection.length !== state.selection.length) {
|
||||
state.selection = newSelection;
|
||||
return;
|
||||
}
|
||||
|
||||
// If the new selection is different, update the selection
|
||||
if (!objectEquals(newSelection, state.selection)) {
|
||||
state.selection = newSelection;
|
||||
return;
|
||||
}
|
||||
|
||||
// Else we have the same selection, do nothing
|
||||
state.selection = uniq(action.payload);
|
||||
},
|
||||
imageToCompareChanged: (state, action: PayloadAction<string | null>) => {
|
||||
state.imageToCompare = action.payload;
|
||||
|
||||
@@ -9,14 +9,13 @@ import { bboxHeightChanged, bboxWidthChanged, canvasMetadataRecalled } from 'fea
|
||||
import { loraAllDeleted, loraRecalled } from 'features/controlLayers/store/lorasSlice';
|
||||
import {
|
||||
heightChanged,
|
||||
negativePrompt2Changed,
|
||||
negativePromptChanged,
|
||||
positivePrompt2Changed,
|
||||
positivePromptChanged,
|
||||
refinerModelChanged,
|
||||
selectBase,
|
||||
setCfgRescaleMultiplier,
|
||||
setCfgScale,
|
||||
setClipSkip,
|
||||
setGuidance,
|
||||
setImg2imgStrength,
|
||||
setRefinerCFGScale,
|
||||
@@ -30,7 +29,6 @@ import {
|
||||
setSeamlessYAxis,
|
||||
setSeed,
|
||||
setSteps,
|
||||
shouldConcatPromptsChanged,
|
||||
vaeSelected,
|
||||
widthChanged,
|
||||
} from 'features/controlLayers/store/paramsSlice';
|
||||
@@ -44,12 +42,12 @@ import { modelSelected } from 'features/parameters/store/actions';
|
||||
import type {
|
||||
ParameterCFGRescaleMultiplier,
|
||||
ParameterCFGScale,
|
||||
ParameterCLIPSkip,
|
||||
ParameterGuidance,
|
||||
ParameterHeight,
|
||||
ParameterModel,
|
||||
ParameterNegativePrompt,
|
||||
ParameterPositivePrompt,
|
||||
ParameterPositiveStylePromptSDXL,
|
||||
ParameterScheduler,
|
||||
ParameterSDXLRefinerModel,
|
||||
ParameterSDXLRefinerNegativeAestheticScore,
|
||||
@@ -67,12 +65,11 @@ import {
|
||||
zLoRAWeight,
|
||||
zParameterCFGRescaleMultiplier,
|
||||
zParameterCFGScale,
|
||||
zParameterCLIPSkip,
|
||||
zParameterGuidance,
|
||||
zParameterImageDimension,
|
||||
zParameterNegativePrompt,
|
||||
zParameterNegativeStylePromptSDXL,
|
||||
zParameterPositivePrompt,
|
||||
zParameterPositiveStylePromptSDXL,
|
||||
zParameterScheduler,
|
||||
zParameterSDXLRefinerNegativeAestheticScore,
|
||||
zParameterSDXLRefinerPositiveAestheticScore,
|
||||
@@ -289,46 +286,6 @@ const NegativePrompt: SingleMetadataHandler<ParameterNegativePrompt> = {
|
||||
};
|
||||
//#endregion Negative Prompt
|
||||
|
||||
//#region SDXL Positive Style Prompt
|
||||
const PositiveStylePrompt: SingleMetadataHandler<ParameterPositiveStylePromptSDXL> = {
|
||||
[SingleMetadataKey]: true,
|
||||
type: 'PositiveStylePrompt',
|
||||
parse: (metadata, _store) => {
|
||||
const raw = getProperty(metadata, 'positive_style_prompt');
|
||||
const parsed = zParameterPositiveStylePromptSDXL.parse(raw);
|
||||
return Promise.resolve(parsed);
|
||||
},
|
||||
recall: (value, store) => {
|
||||
store.dispatch(positivePrompt2Changed(value));
|
||||
},
|
||||
i18nKey: 'sdxl.posStylePrompt',
|
||||
LabelComponent: MetadataLabel,
|
||||
ValueComponent: ({ value }: SingleMetadataValueProps<ParameterPositiveStylePromptSDXL>) => (
|
||||
<MetadataPrimitiveValue value={value} />
|
||||
),
|
||||
};
|
||||
//#endregion SDXL Positive Style Prompt
|
||||
|
||||
//#region SDXL Negative Style Prompt
|
||||
const NegativeStylePrompt: SingleMetadataHandler<ParameterPositiveStylePromptSDXL> = {
|
||||
[SingleMetadataKey]: true,
|
||||
type: 'NegativeStylePrompt',
|
||||
parse: (metadata, _store) => {
|
||||
const raw = getProperty(metadata, 'negative_style_prompt');
|
||||
const parsed = zParameterNegativeStylePromptSDXL.parse(raw);
|
||||
return Promise.resolve(parsed);
|
||||
},
|
||||
recall: (value, store) => {
|
||||
store.dispatch(negativePrompt2Changed(value));
|
||||
},
|
||||
i18nKey: 'sdxl.negStylePrompt',
|
||||
LabelComponent: MetadataLabel,
|
||||
ValueComponent: ({ value }: SingleMetadataValueProps<ParameterPositiveStylePromptSDXL>) => (
|
||||
<MetadataPrimitiveValue value={value} />
|
||||
),
|
||||
};
|
||||
//#endregion SDXL Negative Style Prompt
|
||||
|
||||
//#region CFG Scale
|
||||
const CFGScale: SingleMetadataHandler<ParameterCFGScale> = {
|
||||
[SingleMetadataKey]: true,
|
||||
@@ -367,6 +324,24 @@ const CFGRescaleMultiplier: SingleMetadataHandler<ParameterCFGRescaleMultiplier>
|
||||
};
|
||||
//#endregion CFG Rescale Multiplier
|
||||
|
||||
//#region CLIP Skip
|
||||
const CLIPSkip: SingleMetadataHandler<ParameterCLIPSkip> = {
|
||||
[SingleMetadataKey]: true,
|
||||
type: 'CLIPSkip',
|
||||
parse: (metadata, _store) => {
|
||||
const raw = getProperty(metadata, 'clip_skip');
|
||||
const parsed = zParameterCLIPSkip.parse(raw);
|
||||
return Promise.resolve(parsed);
|
||||
},
|
||||
recall: (value, store) => {
|
||||
store.dispatch(setClipSkip(value));
|
||||
},
|
||||
i18nKey: 'metadata.clipSkip',
|
||||
LabelComponent: MetadataLabel,
|
||||
ValueComponent: ({ value }: SingleMetadataValueProps<ParameterCLIPSkip>) => <MetadataPrimitiveValue value={value} />,
|
||||
};
|
||||
//#endregion CLIP Skip
|
||||
|
||||
//#region Guidance
|
||||
const Guidance: SingleMetadataHandler<ParameterGuidance> = {
|
||||
[SingleMetadataKey]: true,
|
||||
@@ -927,10 +902,9 @@ export const MetadataHandlers = {
|
||||
GenerationMode,
|
||||
PositivePrompt,
|
||||
NegativePrompt,
|
||||
PositiveStylePrompt,
|
||||
NegativeStylePrompt,
|
||||
CFGScale,
|
||||
CFGRescaleMultiplier,
|
||||
CLIPSkip,
|
||||
Guidance,
|
||||
Scheduler,
|
||||
Width,
|
||||
@@ -1052,26 +1026,6 @@ const recallByHandlers = async (arg: {
|
||||
}
|
||||
}
|
||||
|
||||
// We may need to update the prompt concat flag based on the recalled prompts
|
||||
const positivePrompt = recalled.get(MetadataHandlers.PositivePrompt);
|
||||
const negativePrompt = recalled.get(MetadataHandlers.NegativePrompt);
|
||||
const positiveStylePrompt = recalled.get(MetadataHandlers.PositiveStylePrompt);
|
||||
const negativeStylePrompt = recalled.get(MetadataHandlers.NegativeStylePrompt);
|
||||
|
||||
// The values will be undefined if the handler was not recalled
|
||||
if (
|
||||
positivePrompt !== undefined ||
|
||||
negativePrompt !== undefined ||
|
||||
positiveStylePrompt !== undefined ||
|
||||
negativeStylePrompt !== undefined
|
||||
) {
|
||||
const concat =
|
||||
(Boolean(positiveStylePrompt) && positiveStylePrompt === positivePrompt) ||
|
||||
(Boolean(negativeStylePrompt) && negativeStylePrompt === negativePrompt);
|
||||
|
||||
store.dispatch(shouldConcatPromptsChanged(concat));
|
||||
}
|
||||
|
||||
if (!silent) {
|
||||
if (recalled.size > 0) {
|
||||
toast({
|
||||
@@ -1094,12 +1048,7 @@ const recallByHandlers = async (arg: {
|
||||
const recallPrompts = async (metadata: unknown, store: AppStore) => {
|
||||
const recalled = await recallByHandlers({
|
||||
metadata,
|
||||
handlers: [
|
||||
MetadataHandlers.PositivePrompt,
|
||||
MetadataHandlers.NegativePrompt,
|
||||
MetadataHandlers.PositiveStylePrompt,
|
||||
MetadataHandlers.NegativeStylePrompt,
|
||||
],
|
||||
handlers: [MetadataHandlers.PositivePrompt, MetadataHandlers.NegativePrompt],
|
||||
store,
|
||||
silent: true,
|
||||
});
|
||||
|
||||
@@ -2,7 +2,7 @@ import { Button, Flex, Grid, Heading, Text } from '@invoke-ai/ui-library';
|
||||
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
||||
import { map } from 'es-toolkit/compat';
|
||||
import { setInstallModelsTabByName } from 'features/modelManagerV2/store/installModelsStore';
|
||||
import { StarterBundleButton } from 'features/modelManagerV2/subpanels/AddModelPanel/StarterModels/StarterBundle';
|
||||
import { StarterBundleButton } from 'features/modelManagerV2/subpanels/AddModelPanel/StarterModels/StarterBundleButton';
|
||||
import { StarterBundleTooltipContentCompact } from 'features/modelManagerV2/subpanels/AddModelPanel/StarterModels/StarterBundleTooltipContentCompact';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
import type { ButtonProps } from '@invoke-ai/ui-library';
|
||||
import { Button } from '@invoke-ai/ui-library';
|
||||
import { useStarterBundleInstall } from 'features/modelManagerV2/hooks/useStarterBundleInstall';
|
||||
import { useStarterBundleInstallStatus } from 'features/modelManagerV2/hooks/useStarterBundleInstallStatus';
|
||||
import { useCallback } from 'react';
|
||||
import type { S } from 'services/api/types';
|
||||
|
||||
export const StarterBundleButton = ({ bundle, ...rest }: { bundle: S['StarterModelBundle'] } & ButtonProps) => {
|
||||
const { installBundle } = useStarterBundleInstall();
|
||||
const { install } = useStarterBundleInstallStatus(bundle);
|
||||
|
||||
const handleClickBundle = useCallback(() => {
|
||||
installBundle(bundle);
|
||||
}, [installBundle, bundle]);
|
||||
|
||||
return (
|
||||
<Button onClick={handleClickBundle} isDisabled={install.length === 0} {...rest}>
|
||||
{bundle.name}
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,61 @@
|
||||
import type { ButtonProps } from '@invoke-ai/ui-library';
|
||||
import {
|
||||
Button,
|
||||
ConfirmationAlertDialog,
|
||||
Flex,
|
||||
ListItem,
|
||||
Text,
|
||||
UnorderedList,
|
||||
useDisclosure,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useStarterBundleInstall } from 'features/modelManagerV2/hooks/useStarterBundleInstall';
|
||||
import { useStarterBundleInstallStatus } from 'features/modelManagerV2/hooks/useStarterBundleInstallStatus';
|
||||
import { t } from 'i18next';
|
||||
import type { MouseEvent } from 'react';
|
||||
import { useCallback } from 'react';
|
||||
import type { S } from 'services/api/types';
|
||||
|
||||
export const StarterBundleButton = ({ bundle, ...rest }: { bundle: S['StarterModelBundle'] } & ButtonProps) => {
|
||||
const { installBundle } = useStarterBundleInstall();
|
||||
const { install } = useStarterBundleInstallStatus(bundle);
|
||||
const { isOpen, onOpen, onClose } = useDisclosure();
|
||||
|
||||
const onClickBundle = useCallback(
|
||||
(e: MouseEvent<HTMLButtonElement>) => {
|
||||
e.stopPropagation();
|
||||
onOpen();
|
||||
},
|
||||
[onOpen]
|
||||
);
|
||||
const handleInstallBundle = useCallback(() => {
|
||||
installBundle(bundle);
|
||||
}, [installBundle, bundle]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Button onClick={onClickBundle} isDisabled={install.length === 0} {...rest}>
|
||||
{bundle.name}
|
||||
</Button>
|
||||
<ConfirmationAlertDialog
|
||||
isOpen={isOpen}
|
||||
onClose={onClose}
|
||||
title={t('modelManager.installBundle')}
|
||||
acceptCallback={handleInstallBundle}
|
||||
acceptButtonText={t('modelManager.install')}
|
||||
useInert={false}
|
||||
>
|
||||
<Flex rowGap={4} flexDirection="column">
|
||||
<Text fontWeight="bold">{t('modelManager.installBundleMsg1', { bundleName: bundle.name })}</Text>
|
||||
<Text>{t('modelManager.installBundleMsg2', { count: install.length })}</Text>
|
||||
<UnorderedList>
|
||||
{install.map((model, index) => (
|
||||
<ListItem key={index} wordBreak="break-all">
|
||||
<Text>{model.config.name}</Text>
|
||||
</ListItem>
|
||||
))}
|
||||
</UnorderedList>
|
||||
</Flex>
|
||||
</ConfirmationAlertDialog>
|
||||
</>
|
||||
);
|
||||
};
|
||||
@@ -7,7 +7,7 @@ import { useTranslation } from 'react-i18next';
|
||||
import { PiInfoBold, PiXBold } from 'react-icons/pi';
|
||||
import type { GetStarterModelsResponse } from 'services/api/endpoints/models';
|
||||
|
||||
import { StarterBundleButton } from './StarterBundle';
|
||||
import { StarterBundleButton } from './StarterBundleButton';
|
||||
import { StarterBundleTooltipContent } from './StarterBundleTooltipContent';
|
||||
import { StarterModelsResultItem } from './StarterModelsResultItem';
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import InvocationNodeTitle from 'features/nodes/components/flow/nodes/common/InvocationNodeTitle';
|
||||
import NodeCollapseButton from 'features/nodes/components/flow/nodes/common/NodeCollapseButton';
|
||||
import NodeTitle from 'features/nodes/components/flow/nodes/common/NodeTitle';
|
||||
import InvocationNodeClassificationIcon from 'features/nodes/components/flow/nodes/Invocation/InvocationNodeClassificationIcon';
|
||||
import { useNodeHasErrors } from 'features/nodes/hooks/useNodeIsInvalid';
|
||||
import { memo } from 'react';
|
||||
@@ -35,7 +35,7 @@ const InvocationNodeHeader = ({ nodeId, isOpen }: Props) => {
|
||||
<Flex sx={sx} data-is-open={isOpen} data-is-invalid={isInvalid}>
|
||||
<NodeCollapseButton nodeId={nodeId} isOpen={isOpen} />
|
||||
<InvocationNodeClassificationIcon nodeId={nodeId} />
|
||||
<NodeTitle nodeId={nodeId} />
|
||||
<InvocationNodeTitle nodeId={nodeId} />
|
||||
<Flex alignItems="center">
|
||||
<InvocationNodeStatusIndicator nodeId={nodeId} />
|
||||
<InvocationNodeInfoIcon nodeId={nodeId} />
|
||||
|
||||
@@ -11,7 +11,7 @@ type Props = {
|
||||
|
||||
export const InputFieldAddToFormRoot = memo(({ nodeId, fieldName }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const addToRoot = useAddNodeFieldToRoot(nodeId, fieldName);
|
||||
const { isAddedToRoot, addNodeFieldToRoot } = useAddNodeFieldToRoot(nodeId, fieldName);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
@@ -21,7 +21,8 @@ export const InputFieldAddToFormRoot = memo(({ nodeId, fieldName }: Props) => {
|
||||
icon={<PiPlusBold />}
|
||||
pointerEvents="auto"
|
||||
size="xs"
|
||||
onClick={addToRoot}
|
||||
onClick={addNodeFieldToRoot}
|
||||
isDisabled={isAddedToRoot}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -30,12 +30,12 @@ const labelSx: SystemStyleObject = {
|
||||
_hover: {
|
||||
fontWeight: 'semibold !important',
|
||||
},
|
||||
'&[data-is-invalid="true"]': {
|
||||
color: 'error.300',
|
||||
},
|
||||
'&[data-is-added-to-form="true"]': {
|
||||
color: 'blue.300',
|
||||
},
|
||||
'&[data-is-invalid="true"]': {
|
||||
color: 'error.300',
|
||||
},
|
||||
'&[data-is-disabled="true"]': {
|
||||
opacity: 0.5,
|
||||
},
|
||||
@@ -106,7 +106,7 @@ export const InputFieldTitle = memo((props: Props) => {
|
||||
onDoubleClick={onDoubleClick}
|
||||
>
|
||||
{editable.value}
|
||||
{isAddedToForm && <Icon as={PiLinkBold} color="blue.200" ml={1} />}
|
||||
{isAddedToForm && <Icon as={PiLinkBold} color={isInvalid ? 'error.300' : 'blue.200'} ml={1} />}
|
||||
</Text>
|
||||
</Tooltip>
|
||||
);
|
||||
|
||||
@@ -1,22 +1,32 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Flex, Input, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useEditable } from 'common/hooks/useEditable';
|
||||
import { useBatchGroupColorToken } from 'features/nodes/hooks/useBatchGroupColorToken';
|
||||
import { useBatchGroupId } from 'features/nodes/hooks/useBatchGroupId';
|
||||
import { useNodeHasErrors } from 'features/nodes/hooks/useNodeIsInvalid';
|
||||
import { useNodeTemplateTitleSafe } from 'features/nodes/hooks/useNodeTemplateTitleSafe';
|
||||
import { useNodeUserTitleSafe } from 'features/nodes/hooks/useNodeUserTitleSafe';
|
||||
import { nodeLabelChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { NO_FIT_ON_DOUBLE_CLICK_CLASS } from 'features/nodes/types/constants';
|
||||
import { NO_DRAG_CLASS, NO_FIT_ON_DOUBLE_CLICK_CLASS } from 'features/nodes/types/constants';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const labelSx: SystemStyleObject = {
|
||||
fontWeight: 'semibold',
|
||||
'&[data-is-invalid="true"]': {
|
||||
color: 'error.300',
|
||||
},
|
||||
};
|
||||
|
||||
type Props = {
|
||||
nodeId: string;
|
||||
title?: string;
|
||||
};
|
||||
|
||||
const NodeTitle = ({ nodeId, title }: Props) => {
|
||||
const InvocationNodeTitle = ({ nodeId, title }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const isInvalid = useNodeHasErrors();
|
||||
const label = useNodeUserTitleSafe();
|
||||
const batchGroupId = useBatchGroupId(nodeId);
|
||||
const batchGroupColorToken = useBatchGroupColorToken(batchGroupId);
|
||||
@@ -53,16 +63,18 @@ const NodeTitle = ({ nodeId, title }: Props) => {
|
||||
{!editable.isEditing && (
|
||||
<Text
|
||||
className={NO_FIT_ON_DOUBLE_CLICK_CLASS}
|
||||
fontWeight="semibold"
|
||||
color={batchGroupColorToken}
|
||||
onDoubleClick={editable.startEditing}
|
||||
sx={labelSx}
|
||||
noOfLines={1}
|
||||
color={batchGroupColorToken}
|
||||
data-is-invalid={isInvalid}
|
||||
onDoubleClick={editable.startEditing}
|
||||
>
|
||||
{titleWithBatchGroupId}
|
||||
</Text>
|
||||
)}
|
||||
{editable.isEditing && (
|
||||
<Input
|
||||
className={NO_DRAG_CLASS}
|
||||
ref={inputRef}
|
||||
{...editable.inputProps}
|
||||
variant="outline"
|
||||
@@ -73,4 +85,4 @@ const NodeTitle = ({ nodeId, title }: Props) => {
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(NodeTitle);
|
||||
export default memo(InvocationNodeTitle);
|
||||
@@ -5,6 +5,7 @@ import { useInvocationNodeContext } from 'features/nodes/components/flow/nodes/I
|
||||
import { useIsWorkflowEditorLocked } from 'features/nodes/hooks/useIsWorkflowEditorLocked';
|
||||
import { useMouseOverFormField, useMouseOverNode } from 'features/nodes/hooks/useMouseOverNode';
|
||||
import { useNodeExecutionState } from 'features/nodes/hooks/useNodeExecutionState';
|
||||
import { useNodeHasErrors } from 'features/nodes/hooks/useNodeIsInvalid';
|
||||
import { useZoomToNode } from 'features/nodes/hooks/useZoomToNode';
|
||||
import { selectNodeOpacity } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { DRAG_HANDLE_CLASSNAME, NO_FIT_ON_DOUBLE_CLICK_CLASS, NODE_WIDTH } from 'features/nodes/types/constants';
|
||||
@@ -29,6 +30,8 @@ const NodeWrapper = (props: NodeWrapperProps) => {
|
||||
const mouseOverFormField = useMouseOverFormField(nodeId);
|
||||
const zoomToNode = useZoomToNode(nodeId);
|
||||
const isLocked = useIsWorkflowEditorLocked();
|
||||
const isInvalid = useNodeHasErrors();
|
||||
const hasError = isMissingTemplate || isInvalid;
|
||||
|
||||
const executionState = useNodeExecutionState(nodeId);
|
||||
const isInProgress = executionState?.status === zNodeStatus.enum.IN_PROGRESS;
|
||||
@@ -74,7 +77,7 @@ const NodeWrapper = (props: NodeWrapperProps) => {
|
||||
data-is-editor-locked={isLocked}
|
||||
data-is-selected={selected}
|
||||
data-is-mouse-over-form-field={mouseOverFormField.isMouseOverFormField}
|
||||
data-status={isMissingTemplate ? 'error' : needsUpdate ? 'warning' : undefined}
|
||||
data-status={hasError ? 'error' : needsUpdate ? 'warning' : undefined}
|
||||
>
|
||||
<Box sx={shadowsSx} />
|
||||
<Box sx={inProgressSx} data-is-in-progress={isInProgress} />
|
||||
|
||||
@@ -4,7 +4,7 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useEditable } from 'common/hooks/useEditable';
|
||||
import { nodeLabelChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { selectNodes } from 'features/nodes/store/selectors';
|
||||
import { NO_FIT_ON_DOUBLE_CLICK_CLASS } from 'features/nodes/types/constants';
|
||||
import { NO_DRAG_CLASS, NO_FIT_ON_DOUBLE_CLICK_CLASS } from 'features/nodes/types/constants';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
@@ -56,6 +56,7 @@ const NonInvocationNodeTitle = ({ nodeId, title }: Props) => {
|
||||
)}
|
||||
{editable.isEditing && (
|
||||
<Input
|
||||
className={NO_DRAG_CLASS}
|
||||
ref={inputRef}
|
||||
{...editable.inputProps}
|
||||
variant="outline"
|
||||
|
||||
@@ -2,15 +2,20 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useInputFieldInstance } from 'features/nodes/hooks/useInputFieldInstance';
|
||||
import { useInputFieldTemplateOrThrow } from 'features/nodes/hooks/useInputFieldTemplateOrThrow';
|
||||
import { formElementAdded } from 'features/nodes/store/nodesSlice';
|
||||
import { selectFormRootElementId } from 'features/nodes/store/selectors';
|
||||
import { buildSelectWorkflowFormNodeExists, selectFormRootElementId } from 'features/nodes/store/selectors';
|
||||
import { buildNodeFieldElement } from 'features/nodes/types/workflow';
|
||||
import { useCallback } from 'react';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
|
||||
export const useAddNodeFieldToRoot = (nodeId: string, fieldName: string) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const rootElementId = useAppSelector(selectFormRootElementId);
|
||||
const fieldTemplate = useInputFieldTemplateOrThrow(fieldName);
|
||||
const field = useInputFieldInstance(fieldName);
|
||||
const selectWorkflowFormNodeExists = useMemo(
|
||||
() => buildSelectWorkflowFormNodeExists(nodeId, fieldName),
|
||||
[nodeId, fieldName]
|
||||
);
|
||||
const isAddedToRoot = useAppSelector(selectWorkflowFormNodeExists);
|
||||
|
||||
const addNodeFieldToRoot = useCallback(() => {
|
||||
const element = buildNodeFieldElement(nodeId, fieldName, fieldTemplate.type);
|
||||
@@ -23,5 +28,5 @@ export const useAddNodeFieldToRoot = (nodeId: string, fieldName: string) => {
|
||||
);
|
||||
}, [nodeId, fieldName, fieldTemplate.type, dispatch, rootElementId, field.value]);
|
||||
|
||||
return addNodeFieldToRoot;
|
||||
return { isAddedToRoot, addNodeFieldToRoot };
|
||||
};
|
||||
|
||||
@@ -103,3 +103,7 @@ export const selectWorkflowFormNodeFieldFieldIdentifiersDeduped = createSelector
|
||||
);
|
||||
|
||||
export const buildSelectElement = (id: string) => createNodesSelector((workflow) => workflow.form?.elements[id]);
|
||||
export const buildSelectWorkflowFormNodeExists = (nodeId: string, fieldName: string) =>
|
||||
createSelector(selectWorkflowFormNodeFieldFieldIdentifiersDeduped, (identifiers) =>
|
||||
identifiers.some((identifier) => identifier.nodeId === nodeId && identifier.fieldName === fieldName)
|
||||
);
|
||||
|
||||
@@ -115,7 +115,7 @@ export const buildMultidiffusionUpscaleGraph = async (state: RootState): Promise
|
||||
type: 'sdxl_compel_prompt',
|
||||
id: getPrefixedId('neg_cond'),
|
||||
prompt: prompts.negative,
|
||||
style: prompts.negativeStyle,
|
||||
style: prompts.negative,
|
||||
});
|
||||
modelLoader = g.addNode({
|
||||
type: 'sdxl_model_loader',
|
||||
@@ -130,21 +130,14 @@ export const buildMultidiffusionUpscaleGraph = async (state: RootState): Promise
|
||||
g.addEdge(modelLoader, 'unet', tiledMultidiffusion, 'unet');
|
||||
|
||||
g.addEdge(positivePrompt, 'value', posCond, 'prompt');
|
||||
g.addEdge(positivePrompt, 'value', posCond, 'style');
|
||||
|
||||
addSDXLLoRAs(state, g, tiledMultidiffusion, modelLoader, null, posCond, negCond);
|
||||
|
||||
g.upsertMetadata({
|
||||
negative_prompt: prompts.negative,
|
||||
negative_style_prompt: prompts.negativeStyle,
|
||||
});
|
||||
|
||||
if (prompts.useMainPromptsForStyle) {
|
||||
g.addEdge(positivePrompt, 'value', posCond, 'style');
|
||||
g.addEdgeToMetadata(positivePrompt, 'value', 'positive_style_prompt');
|
||||
} else {
|
||||
posCond.style = prompts.positiveStyle;
|
||||
g.upsertMetadata({ positive_style_prompt: prompts.positiveStyle });
|
||||
}
|
||||
g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt');
|
||||
} else {
|
||||
const prompts = selectPresetModifiedPrompts(state);
|
||||
|
||||
@@ -179,6 +172,8 @@ export const buildMultidiffusionUpscaleGraph = async (state: RootState): Promise
|
||||
g.upsertMetadata({
|
||||
negative_prompt: prompts.negative,
|
||||
});
|
||||
|
||||
g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt');
|
||||
}
|
||||
|
||||
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
|
||||
|
||||
@@ -156,17 +156,24 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
|
||||
.filter((entity) => getGlobalReferenceImageWarnings(entity, model).length === 0);
|
||||
|
||||
if (validFLUXKontextConfigs.length > 0) {
|
||||
const kontextConcatenator = g.addNode({
|
||||
id: getPrefixedId('flux_kontext_image_prep'),
|
||||
type: 'flux_kontext_image_prep',
|
||||
images: validFLUXKontextConfigs.map(({ config }) => zImageField.parse(config.image)),
|
||||
const fluxKontextCollect = g.addNode({
|
||||
type: 'collect',
|
||||
id: getPrefixedId('flux_kontext_collect'),
|
||||
});
|
||||
const kontextConditioning = g.addNode({
|
||||
type: 'flux_kontext',
|
||||
id: getPrefixedId('flux_kontext'),
|
||||
});
|
||||
g.addEdge(kontextConcatenator, 'image', kontextConditioning, 'image');
|
||||
g.addEdge(kontextConditioning, 'kontext_cond', denoise, 'kontext_conditioning');
|
||||
for (const { config } of validFLUXKontextConfigs) {
|
||||
const kontextImagePrep = g.addNode({
|
||||
id: getPrefixedId('flux_kontext_image_prep'),
|
||||
type: 'flux_kontext_image_prep',
|
||||
images: [zImageField.parse(config.image)],
|
||||
});
|
||||
const kontextConditioning = g.addNode({
|
||||
type: 'flux_kontext',
|
||||
id: getPrefixedId('flux_kontext'),
|
||||
});
|
||||
g.addEdge(kontextImagePrep, 'image', kontextConditioning, 'image');
|
||||
g.addEdge(kontextConditioning, 'kontext_cond', fluxKontextCollect, 'item');
|
||||
}
|
||||
g.addEdge(fluxKontextCollect, 'collection', denoise, 'kontext_conditioning');
|
||||
|
||||
g.upsertMetadata({ ref_images: [validFLUXKontextConfigs] }, 'merge');
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
|
||||
type: 'sdxl_compel_prompt',
|
||||
id: getPrefixedId('neg_cond'),
|
||||
prompt: prompts.negative,
|
||||
style: prompts.useMainPromptsForStyle ? prompts.negative : prompts.negativeStyle,
|
||||
style: prompts.negative,
|
||||
});
|
||||
const negCondCollect = g.addNode({
|
||||
type: 'collect',
|
||||
@@ -123,6 +123,8 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
|
||||
g.addEdge(modelLoader, 'clip2', negCond, 'clip2');
|
||||
|
||||
g.addEdge(positivePrompt, 'value', posCond, 'prompt');
|
||||
g.addEdge(positivePrompt, 'value', posCond, 'style');
|
||||
|
||||
g.addEdge(posCond, 'conditioning', posCondCollect, 'item');
|
||||
g.addEdge(posCondCollect, 'collection', denoise, 'positive_conditioning');
|
||||
|
||||
@@ -141,20 +143,11 @@ export const buildSDXLGraph = async (arg: GraphBuilderArg): Promise<GraphBuilder
|
||||
rand_device: shouldUseCpuNoise ? 'cpu' : 'cuda',
|
||||
scheduler,
|
||||
negative_prompt: prompts.negative,
|
||||
negative_style_prompt: prompts.useMainPromptsForStyle ? prompts.negative : prompts.negativeStyle,
|
||||
vae: vae ?? undefined,
|
||||
});
|
||||
g.addEdgeToMetadata(seed, 'value', 'seed');
|
||||
g.addEdgeToMetadata(positivePrompt, 'value', 'positive_prompt');
|
||||
|
||||
if (prompts.useMainPromptsForStyle) {
|
||||
g.addEdge(positivePrompt, 'value', posCond, 'style');
|
||||
g.addEdgeToMetadata(positivePrompt, 'value', 'positive_style_prompt');
|
||||
} else {
|
||||
posCond.style = prompts.positiveStyle;
|
||||
g.upsertMetadata({ positive_style_prompt: prompts.positiveStyle });
|
||||
}
|
||||
|
||||
const seamless = addSeamless(state, g, denoise, modelLoader, vaeLoader);
|
||||
|
||||
addSDXLLoRAs(state, g, denoise, modelLoader, seamless, posCond, negCond);
|
||||
|
||||
@@ -85,7 +85,7 @@ export const selectPresetModifiedPrompts = createSelector(
|
||||
selectListStylePresetsRequestState,
|
||||
(params, stylePresetSlice, listStylePresetsRequestState) => {
|
||||
const negativePrompt = params.negativePrompt ?? '';
|
||||
const { positivePrompt, positivePrompt2, negativePrompt2, shouldConcatPrompts } = params;
|
||||
const { positivePrompt } = params;
|
||||
const { activeStylePresetId } = stylePresetSlice;
|
||||
|
||||
if (activeStylePresetId) {
|
||||
@@ -107,9 +107,6 @@ export const selectPresetModifiedPrompts = createSelector(
|
||||
return {
|
||||
positive: presetModifiedPositivePrompt,
|
||||
negative: presetModifiedNegativePrompt,
|
||||
positiveStyle: positivePrompt2,
|
||||
negativeStyle: negativePrompt2,
|
||||
useMainPromptsForStyle: shouldConcatPrompts,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -117,9 +114,6 @@ export const selectPresetModifiedPrompts = createSelector(
|
||||
return {
|
||||
positive: positivePrompt,
|
||||
negative: negativePrompt,
|
||||
positiveStyle: positivePrompt2,
|
||||
negativeStyle: negativePrompt2,
|
||||
useMainPromptsForStyle: shouldConcatPrompts,
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { selectCLIPSKip, selectModel, setClipSkip } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCLIPSkip, selectModel, setClipSkip } from 'features/controlLayers/store/paramsSlice';
|
||||
import { CLIP_SKIP_MAP } from 'features/parameters/types/constants';
|
||||
import { selectCLIPSkipConfig } from 'features/system/store/configSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const ParamClipSkip = () => {
|
||||
const clipSkip = useAppSelector(selectCLIPSKip);
|
||||
const clipSkip = useAppSelector(selectCLIPSkip);
|
||||
const config = useAppSelector(selectCLIPSkipConfig);
|
||||
const model = useAppSelector(selectModel);
|
||||
|
||||
|
||||
@@ -2,9 +2,11 @@ import { IconButton, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { negativePromptChanged, selectHasNegativePrompt } from 'features/controlLayers/store/paramsSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusMinusBold } from 'react-icons/pi';
|
||||
|
||||
export const NegativePromptToggleButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const hasNegativePrompt = useAppSelector(selectHasNegativePrompt);
|
||||
|
||||
const dispatch = useAppDispatch();
|
||||
@@ -18,8 +20,8 @@ export const NegativePromptToggleButton = memo(() => {
|
||||
}, [dispatch, hasNegativePrompt]);
|
||||
|
||||
const label = useMemo(
|
||||
() => (hasNegativePrompt ? 'Remove Negative Prompt' : 'Add Negative Prompt'),
|
||||
[hasNegativePrompt]
|
||||
() => (hasNegativePrompt ? t('common.removeNegativePrompt') : t('common.addNegativePrompt')),
|
||||
[hasNegativePrompt, t]
|
||||
);
|
||||
|
||||
return (
|
||||
|
||||
@@ -4,7 +4,6 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import {
|
||||
positivePromptChanged,
|
||||
selectBase,
|
||||
selectModelSupportsNegativePrompt,
|
||||
selectPositivePrompt,
|
||||
} from 'features/controlLayers/store/paramsSlice';
|
||||
@@ -21,7 +20,6 @@ import { PromptExpansionOverlay } from 'features/prompt/PromptExpansion/PromptEx
|
||||
import { promptExpansionApi } from 'features/prompt/PromptExpansion/state';
|
||||
import { PromptPopover } from 'features/prompt/PromptPopover';
|
||||
import { usePrompt } from 'features/prompt/usePrompt';
|
||||
import { SDXLConcatButton } from 'features/sdxl/components/SDXLPrompts/SDXLConcatButton';
|
||||
import {
|
||||
selectStylePresetActivePresetId,
|
||||
selectStylePresetViewMode,
|
||||
@@ -42,7 +40,6 @@ const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
export const ParamPositivePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectPositivePrompt);
|
||||
const baseModel = useAppSelector(selectBase);
|
||||
const viewMode = useAppSelector(selectStylePresetViewMode);
|
||||
const activeStylePresetId = useAppSelector(selectStylePresetActivePresetId);
|
||||
const modelSupportsNegativePrompt = useAppSelector(selectModelSupportsNegativePrompt);
|
||||
@@ -118,7 +115,6 @@ export const ParamPositivePrompt = memo(() => {
|
||||
<PromptOverlayButtonWrapper>
|
||||
<Flex flexDir="column" gap={2} justifyContent="flex-start" alignItems="center">
|
||||
<AddPromptTriggerButton isOpen={isOpen} onOpen={onOpen} />
|
||||
{baseModel === 'sdxl' && <SDXLConcatButton />}
|
||||
<ShowDynamicPromptsPreviewButton />
|
||||
{modelSupportsNegativePrompt && <NegativePromptToggleButton />}
|
||||
</Flex>
|
||||
|
||||
@@ -1,33 +1,18 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { RefImageList } from 'features/controlLayers/components/RefImage/RefImageList';
|
||||
import {
|
||||
createParamsSelector,
|
||||
selectHasNegativePrompt,
|
||||
selectModelSupportsNegativePrompt,
|
||||
} from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectHasNegativePrompt, selectModelSupportsNegativePrompt } from 'features/controlLayers/store/paramsSlice';
|
||||
import { ParamNegativePrompt } from 'features/parameters/components/Core/ParamNegativePrompt';
|
||||
import { ParamPositivePrompt } from 'features/parameters/components/Core/ParamPositivePrompt';
|
||||
import { ParamSDXLNegativeStylePrompt } from 'features/sdxl/components/SDXLPrompts/ParamSDXLNegativeStylePrompt';
|
||||
import { ParamSDXLPositiveStylePrompt } from 'features/sdxl/components/SDXLPrompts/ParamSDXLPositiveStylePrompt';
|
||||
import { memo } from 'react';
|
||||
|
||||
const selectWithStylePrompts = createParamsSelector((params) => {
|
||||
const isSDXL = params.model?.base === 'sdxl';
|
||||
const shouldConcatPrompts = params.shouldConcatPrompts;
|
||||
return isSDXL && !shouldConcatPrompts;
|
||||
});
|
||||
|
||||
export const Prompts = memo(() => {
|
||||
const withStylePrompts = useAppSelector(selectWithStylePrompts);
|
||||
const modelSupportsNegativePrompt = useAppSelector(selectModelSupportsNegativePrompt);
|
||||
const hasNegativePrompt = useAppSelector(selectHasNegativePrompt);
|
||||
return (
|
||||
<Flex flexDir="column" gap={2}>
|
||||
<ParamPositivePrompt />
|
||||
{withStylePrompts && <ParamSDXLPositiveStylePrompt />}
|
||||
{modelSupportsNegativePrompt && hasNegativePrompt && <ParamNegativePrompt />}
|
||||
{withStylePrompts && <ParamSDXLNegativeStylePrompt />}
|
||||
<RefImageList />
|
||||
</Flex>
|
||||
);
|
||||
|
||||
@@ -1,32 +1,17 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import {
|
||||
createParamsSelector,
|
||||
selectHasNegativePrompt,
|
||||
selectModelSupportsNegativePrompt,
|
||||
} from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectHasNegativePrompt, selectModelSupportsNegativePrompt } from 'features/controlLayers/store/paramsSlice';
|
||||
import { ParamNegativePrompt } from 'features/parameters/components/Core/ParamNegativePrompt';
|
||||
import { ParamPositivePrompt } from 'features/parameters/components/Core/ParamPositivePrompt';
|
||||
import { ParamSDXLNegativeStylePrompt } from 'features/sdxl/components/SDXLPrompts/ParamSDXLNegativeStylePrompt';
|
||||
import { ParamSDXLPositiveStylePrompt } from 'features/sdxl/components/SDXLPrompts/ParamSDXLPositiveStylePrompt';
|
||||
import { memo } from 'react';
|
||||
|
||||
const selectWithStylePrompts = createParamsSelector((params) => {
|
||||
const isSDXL = params.model?.base === 'sdxl';
|
||||
const shouldConcatPrompts = params.shouldConcatPrompts;
|
||||
return isSDXL && !shouldConcatPrompts;
|
||||
});
|
||||
|
||||
export const UpscalePrompts = memo(() => {
|
||||
const withStylePrompts = useAppSelector(selectWithStylePrompts);
|
||||
const modelSupportsNegativePrompt = useAppSelector(selectModelSupportsNegativePrompt);
|
||||
const hasNegativePrompt = useAppSelector(selectHasNegativePrompt);
|
||||
return (
|
||||
<Flex flexDir="column" gap={2}>
|
||||
<ParamPositivePrompt />
|
||||
{withStylePrompts && <ParamSDXLPositiveStylePrompt />}
|
||||
{modelSupportsNegativePrompt && hasNegativePrompt && <ParamNegativePrompt />}
|
||||
{withStylePrompts && <ParamSDXLNegativeStylePrompt />}
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -33,16 +33,6 @@ export const [zParameterNegativePrompt, isParameterNegativePrompt] = buildParame
|
||||
export type ParameterNegativePrompt = z.infer<typeof zParameterNegativePrompt>;
|
||||
// #endregion
|
||||
|
||||
// #region Positive style prompt (SDXL)
|
||||
export const [zParameterPositiveStylePromptSDXL, isParameterPositiveStylePromptSDXL] = buildParameter(z.string());
|
||||
export type ParameterPositiveStylePromptSDXL = z.infer<typeof zParameterPositiveStylePromptSDXL>;
|
||||
// #endregion
|
||||
|
||||
// #region Positive style prompt (SDXL)
|
||||
export const [zParameterNegativeStylePromptSDXL, isParameterNegativeStylePromptSDXL] = buildParameter(z.string());
|
||||
export type ParameterNegativeStylePromptSDXL = z.infer<typeof zParameterNegativeStylePromptSDXL>;
|
||||
// #endregion
|
||||
|
||||
// #region Steps
|
||||
export const [zParameterSteps, isParameterSteps] = buildParameter(z.number().int().min(1));
|
||||
export type ParameterSteps = z.infer<typeof zParameterSteps>;
|
||||
@@ -203,3 +193,8 @@ export type ParameterCanvasCoherenceMode = z.infer<typeof zParameterCanvasCohere
|
||||
export const [zLoRAWeight, isParameterLoRAWeight] = buildParameter(z.number());
|
||||
export type ParameterLoRAWeight = z.infer<typeof zLoRAWeight>;
|
||||
// #endregion
|
||||
|
||||
// #region CLIP skip
|
||||
export const [zParameterCLIPSkip, isParameterCLIPSkip] = buildParameter(z.number().int().min(0));
|
||||
export type ParameterCLIPSkip = z.infer<typeof zParameterCLIPSkip>;
|
||||
// #endregion
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { negativePrompt2Changed, selectNegativePrompt2 } from 'features/controlLayers/store/paramsSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
import { AddPromptTriggerButton } from 'features/prompt/AddPromptTriggerButton';
|
||||
import { PromptPopover } from 'features/prompt/PromptPopover';
|
||||
import { usePrompt } from 'features/prompt/usePrompt';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
};
|
||||
|
||||
export const ParamSDXLNegativeStylePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectNegativePrompt2);
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('negative_style_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { t } = useTranslation();
|
||||
const handleChange = useCallback(
|
||||
(v: string) => {
|
||||
dispatch(negativePrompt2Changed(v));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
const { onChange, isOpen, onClose, onOpen, onSelect, onKeyDown } = usePrompt({
|
||||
prompt,
|
||||
textareaRef: textareaRef,
|
||||
onChange: handleChange,
|
||||
});
|
||||
|
||||
return (
|
||||
<PromptPopover isOpen={isOpen} onClose={onClose} onSelect={onSelect} width={textareaRef.current?.clientWidth}>
|
||||
<Box pos="relative">
|
||||
<Textarea
|
||||
className="negative-style-prompt-textarea"
|
||||
name="prompt"
|
||||
ref={textareaRef}
|
||||
value={prompt}
|
||||
onChange={onChange}
|
||||
onKeyDown={onKeyDown}
|
||||
fontSize="sm"
|
||||
variant="darkFilled"
|
||||
minH={24}
|
||||
borderTopWidth={24} // This prevents the prompt from being hidden behind the header
|
||||
paddingInlineEnd={10}
|
||||
paddingInlineStart={3}
|
||||
paddingTop={0}
|
||||
paddingBottom={3}
|
||||
/>
|
||||
<PromptOverlayButtonWrapper>
|
||||
<AddPromptTriggerButton isOpen={isOpen} onOpen={onOpen} />
|
||||
</PromptOverlayButtonWrapper>
|
||||
<PromptLabel label={t('sdxl.negStylePrompt')} />
|
||||
</Box>
|
||||
</PromptPopover>
|
||||
);
|
||||
});
|
||||
|
||||
ParamSDXLNegativeStylePrompt.displayName = 'ParamSDXLNegativeStylePrompt';
|
||||
@@ -1,65 +0,0 @@
|
||||
import { Box, Textarea } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { usePersistedTextAreaSize } from 'common/hooks/usePersistedTextareaSize';
|
||||
import { positivePrompt2Changed, selectPositivePrompt2 } from 'features/controlLayers/store/paramsSlice';
|
||||
import { PromptLabel } from 'features/parameters/components/Prompts/PromptLabel';
|
||||
import { PromptOverlayButtonWrapper } from 'features/parameters/components/Prompts/PromptOverlayButtonWrapper';
|
||||
import { AddPromptTriggerButton } from 'features/prompt/AddPromptTriggerButton';
|
||||
import { PromptPopover } from 'features/prompt/PromptPopover';
|
||||
import { usePrompt } from 'features/prompt/usePrompt';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const persistOptions: Parameters<typeof usePersistedTextAreaSize>[2] = {
|
||||
trackWidth: false,
|
||||
trackHeight: true,
|
||||
};
|
||||
|
||||
export const ParamSDXLPositiveStylePrompt = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const prompt = useAppSelector(selectPositivePrompt2);
|
||||
const textareaRef = useRef<HTMLTextAreaElement>(null);
|
||||
usePersistedTextAreaSize('positive_style_prompt', textareaRef, persistOptions);
|
||||
|
||||
const { t } = useTranslation();
|
||||
const handleChange = useCallback(
|
||||
(v: string) => {
|
||||
dispatch(positivePrompt2Changed(v));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
const { onChange, isOpen, onClose, onOpen, onSelect, onKeyDown } = usePrompt({
|
||||
prompt,
|
||||
textareaRef: textareaRef,
|
||||
onChange: handleChange,
|
||||
});
|
||||
|
||||
return (
|
||||
<PromptPopover isOpen={isOpen} onClose={onClose} onSelect={onSelect} width={textareaRef.current?.clientWidth}>
|
||||
<Box pos="relative">
|
||||
<Textarea
|
||||
className="positive-style-prompt-textarea"
|
||||
name="prompt"
|
||||
ref={textareaRef}
|
||||
value={prompt}
|
||||
onChange={onChange}
|
||||
onKeyDown={onKeyDown}
|
||||
fontSize="sm"
|
||||
variant="darkFilled"
|
||||
minH={24}
|
||||
borderTopWidth={24} // This prevents the prompt from being hidden behind the header
|
||||
paddingInlineEnd={10}
|
||||
paddingInlineStart={3}
|
||||
paddingTop={0}
|
||||
paddingBottom={3}
|
||||
/>
|
||||
<PromptOverlayButtonWrapper>
|
||||
<AddPromptTriggerButton isOpen={isOpen} onOpen={onOpen} />
|
||||
</PromptOverlayButtonWrapper>
|
||||
<PromptLabel label={t('sdxl.posStylePrompt')} />
|
||||
</Box>
|
||||
</PromptPopover>
|
||||
);
|
||||
});
|
||||
|
||||
ParamSDXLPositiveStylePrompt.displayName = 'ParamSDXLPositiveStylePrompt';
|
||||
@@ -1,37 +0,0 @@
|
||||
import { IconButton, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectShouldConcatPrompts, shouldConcatPromptsChanged } from 'features/controlLayers/store/paramsSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiLinkSimpleBold, PiLinkSimpleBreakBold } from 'react-icons/pi';
|
||||
|
||||
export const SDXLConcatButton = memo(() => {
|
||||
const shouldConcatPrompts = useAppSelector(selectShouldConcatPrompts);
|
||||
|
||||
const dispatch = useAppDispatch();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleShouldConcatPromptChange = useCallback(() => {
|
||||
dispatch(shouldConcatPromptsChanged(!shouldConcatPrompts));
|
||||
}, [dispatch, shouldConcatPrompts]);
|
||||
|
||||
const label = useMemo(
|
||||
() => (shouldConcatPrompts ? t('sdxl.concatPromptStyle') : t('sdxl.freePromptStyle')),
|
||||
[shouldConcatPrompts, t]
|
||||
);
|
||||
|
||||
return (
|
||||
<Tooltip label={label}>
|
||||
<IconButton
|
||||
aria-label={label}
|
||||
onClick={handleShouldConcatPromptChange}
|
||||
icon={shouldConcatPrompts ? <PiLinkSimpleBold size={14} /> : <PiLinkSimpleBreakBold size={14} />}
|
||||
variant="promptOverlay"
|
||||
fontSize={12}
|
||||
px={0.5}
|
||||
/>
|
||||
</Tooltip>
|
||||
);
|
||||
});
|
||||
|
||||
SDXLConcatButton.displayName = 'SDXLConcatButton';
|
||||
@@ -126,6 +126,7 @@ export const useHotkeyData = (): HotkeysData => {
|
||||
addHotkey('canvas', 'cancelSegmentAnything', ['esc']);
|
||||
addHotkey('canvas', 'toggleNonRasterLayers', ['shift+h']);
|
||||
addHotkey('canvas', 'fitBboxToMasks', ['shift+b']);
|
||||
addHotkey('canvas', 'toggleBbox', ['shift+o']);
|
||||
|
||||
// Workflows
|
||||
addHotkey('workflows', 'addNode', ['shift+a', 'space']);
|
||||
|
||||
@@ -3,7 +3,7 @@ import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useFocusRegion, useIsRegionFocused } from 'common/hooks/focus';
|
||||
import type { IDockviewPanelProps, IGridviewPanelProps } from 'dockview';
|
||||
import { selectSystemShouldEnableHighlightFocusedRegions } from 'features/system/store/systemSlice';
|
||||
import type { PanelParameters } from 'features/ui/layouts/auto-layout-context';
|
||||
import type { DockviewPanelParameters, GridviewPanelParameters } from 'features/ui/layouts/auto-layout-context';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo, useRef } from 'react';
|
||||
|
||||
@@ -30,8 +30,8 @@ const sx: SystemStyleObject = {
|
||||
export const AutoLayoutPanelContainer = memo(
|
||||
(
|
||||
props:
|
||||
| PropsWithChildren<IDockviewPanelProps<PanelParameters>>
|
||||
| PropsWithChildren<IGridviewPanelProps<PanelParameters>>
|
||||
| PropsWithChildren<IDockviewPanelProps<DockviewPanelParameters>>
|
||||
| PropsWithChildren<IGridviewPanelProps<GridviewPanelParameters>>
|
||||
) => {
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
const shouldHighlightFocusedRegions = useAppSelector(selectSystemShouldEnableHighlightFocusedRegions);
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { ContextMenu, Divider, Flex, IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasAlertsBboxVisibility } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsBboxVisibility';
|
||||
import { CanvasAlertsInvocationProgress } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsInvocationProgress';
|
||||
import { CanvasAlertsPreserveMask } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsPreserveMask';
|
||||
import { CanvasAlertsSaveAllImagesToGallery } from 'features/controlLayers/components/CanvasAlerts/CanvasAlertsSaveAllImagesToGallery';
|
||||
@@ -92,6 +93,7 @@ export const CanvasWorkspacePanel = memo(() => {
|
||||
<CanvasAlertsSelectedEntityStatus />
|
||||
<CanvasAlertsPreserveMask />
|
||||
<CanvasAlertsInvocationProgress />
|
||||
<CanvasAlertsBboxVisibility />
|
||||
</Flex>
|
||||
<Flex position="absolute" top={1} insetInlineEnd={1}>
|
||||
<Menu>
|
||||
|
||||
@@ -3,10 +3,12 @@ import { setFocusedRegion } from 'common/hooks/focus';
|
||||
import { useCallbackOnDragEnter } from 'common/hooks/useCallbackOnDragEnter';
|
||||
import type { IDockviewPanelHeaderProps } from 'dockview';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import type { PanelParameters } from './auto-layout-context';
|
||||
import type { DockviewPanelParameters } from './auto-layout-context';
|
||||
|
||||
export const DockviewTab = memo((props: IDockviewPanelHeaderProps<PanelParameters>) => {
|
||||
export const DockviewTab = memo((props: IDockviewPanelHeaderProps<DockviewPanelParameters>) => {
|
||||
const { t } = useTranslation();
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
const setActive = useCallback(() => {
|
||||
if (!props.api.isActive) {
|
||||
@@ -23,7 +25,7 @@ export const DockviewTab = memo((props: IDockviewPanelHeaderProps<PanelParameter
|
||||
return (
|
||||
<Flex ref={ref} alignItems="center" h="full" onPointerDown={onPointerDown}>
|
||||
<Text userSelect="none" px={4}>
|
||||
{props.api.title ?? props.api.id}
|
||||
{t(props.params.i18nKey)}
|
||||
</Text>
|
||||
</Flex>
|
||||
);
|
||||
|
||||
@@ -5,11 +5,13 @@ import type { IDockviewPanelHeaderProps } from 'dockview';
|
||||
import { useCurrentQueueItemDestination } from 'features/queue/hooks/useCurrentQueueItemDestination';
|
||||
import ProgressBar from 'features/system/components/ProgressBar';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useIsGenerationInProgress } from 'services/api/endpoints/queue';
|
||||
|
||||
import type { PanelParameters } from './auto-layout-context';
|
||||
import type { DockviewPanelParameters } from './auto-layout-context';
|
||||
|
||||
export const DockviewTabCanvasViewer = memo((props: IDockviewPanelHeaderProps<PanelParameters>) => {
|
||||
export const DockviewTabCanvasViewer = memo((props: IDockviewPanelHeaderProps<DockviewPanelParameters>) => {
|
||||
const { t } = useTranslation();
|
||||
const isGenerationInProgress = useIsGenerationInProgress();
|
||||
const currentQueueItemDestination = useCurrentQueueItemDestination();
|
||||
|
||||
@@ -29,7 +31,7 @@ export const DockviewTabCanvasViewer = memo((props: IDockviewPanelHeaderProps<Pa
|
||||
return (
|
||||
<Flex ref={ref} position="relative" alignItems="center" h="full" onPointerDown={onPointerDown}>
|
||||
<Text userSelect="none" px={4}>
|
||||
{props.api.title ?? props.api.id}
|
||||
{t(props.params.i18nKey)}
|
||||
</Text>
|
||||
{currentQueueItemDestination === 'canvas' && isGenerationInProgress && (
|
||||
<ProgressBar position="absolute" bottom={0} left={0} right={0} h={1} borderRadius="none" />
|
||||
|
||||
@@ -7,11 +7,13 @@ import { selectCanvasSessionId } from 'features/controlLayers/store/canvasStagin
|
||||
import { useCurrentQueueItemDestination } from 'features/queue/hooks/useCurrentQueueItemDestination';
|
||||
import ProgressBar from 'features/system/components/ProgressBar';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useIsGenerationInProgress } from 'services/api/endpoints/queue';
|
||||
|
||||
import type { PanelParameters } from './auto-layout-context';
|
||||
import type { DockviewPanelParameters } from './auto-layout-context';
|
||||
|
||||
export const DockviewTabCanvasWorkspace = memo((props: IDockviewPanelHeaderProps<PanelParameters>) => {
|
||||
export const DockviewTabCanvasWorkspace = memo((props: IDockviewPanelHeaderProps<DockviewPanelParameters>) => {
|
||||
const { t } = useTranslation();
|
||||
const isGenerationInProgress = useIsGenerationInProgress();
|
||||
const canvasSessionId = useAppSelector(selectCanvasSessionId);
|
||||
const currentQueueItemDestination = useCurrentQueueItemDestination();
|
||||
@@ -32,7 +34,7 @@ export const DockviewTabCanvasWorkspace = memo((props: IDockviewPanelHeaderProps
|
||||
return (
|
||||
<Flex ref={ref} position="relative" alignItems="center" h="full" onPointerDown={onPointerDown}>
|
||||
<Text userSelect="none" px={4}>
|
||||
{props.api.title ?? props.api.id}
|
||||
{t(props.params.i18nKey)}
|
||||
</Text>
|
||||
{currentQueueItemDestination === canvasSessionId && isGenerationInProgress && (
|
||||
<ProgressBar position="absolute" bottom={0} left={0} right={0} h={1} borderRadius="none" />
|
||||
|
||||
@@ -6,6 +6,7 @@ import type { IDockviewPanelHeaderProps } from 'dockview';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import type { TabName } from 'features/ui/store/uiTypes';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import type { IconType } from 'react-icons';
|
||||
import {
|
||||
PiBoundingBoxBold,
|
||||
@@ -16,6 +17,8 @@ import {
|
||||
PiTextAaBold,
|
||||
} from 'react-icons/pi';
|
||||
|
||||
import type { DockviewPanelParameters } from './auto-layout-context';
|
||||
|
||||
const TAB_ICONS: Record<TabName, IconType> = {
|
||||
generate: PiTextAaBold,
|
||||
canvas: PiBoundingBoxBold,
|
||||
@@ -25,7 +28,8 @@ const TAB_ICONS: Record<TabName, IconType> = {
|
||||
queue: PiQueueBold,
|
||||
};
|
||||
|
||||
export const DockviewTabLaunchpad = memo((props: IDockviewPanelHeaderProps) => {
|
||||
export const DockviewTabLaunchpad = memo((props: IDockviewPanelHeaderProps<DockviewPanelParameters>) => {
|
||||
const { t } = useTranslation();
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
const activeTab = useAppSelector(selectActiveTab);
|
||||
|
||||
@@ -44,7 +48,7 @@ export const DockviewTabLaunchpad = memo((props: IDockviewPanelHeaderProps) => {
|
||||
return (
|
||||
<Flex ref={ref} alignItems="center" h="full" px={4} gap={3} onPointerDown={onPointerDown}>
|
||||
<Icon as={TAB_ICONS[activeTab]} color="invokeYellow.300" boxSize={5} />
|
||||
<Text userSelect="none">{props.api.title ?? props.api.id}</Text>
|
||||
<Text userSelect="none">{t(props.params.i18nKey)}</Text>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -4,11 +4,13 @@ import { useCallbackOnDragEnter } from 'common/hooks/useCallbackOnDragEnter';
|
||||
import type { IDockviewPanelHeaderProps } from 'dockview';
|
||||
import ProgressBar from 'features/system/components/ProgressBar';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useIsGenerationInProgress } from 'services/api/endpoints/queue';
|
||||
|
||||
import type { PanelParameters } from './auto-layout-context';
|
||||
import type { DockviewPanelParameters } from './auto-layout-context';
|
||||
|
||||
export const DockviewTabProgress = memo((props: IDockviewPanelHeaderProps<PanelParameters>) => {
|
||||
export const DockviewTabProgress = memo((props: IDockviewPanelHeaderProps<DockviewPanelParameters>) => {
|
||||
const { t } = useTranslation();
|
||||
const isGenerationInProgress = useIsGenerationInProgress();
|
||||
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
@@ -27,7 +29,7 @@ export const DockviewTabProgress = memo((props: IDockviewPanelHeaderProps<PanelP
|
||||
return (
|
||||
<Flex ref={ref} position="relative" alignItems="center" h="full" onPointerDown={onPointerDown}>
|
||||
<Text userSelect="none" px={4}>
|
||||
{props.api.title ?? props.api.id}
|
||||
{t(props.params.i18nKey)}
|
||||
</Text>
|
||||
{isGenerationInProgress && (
|
||||
<ProgressBar position="absolute" bottom={0} left={0} right={0} h={1} borderRadius="none" />
|
||||
|
||||
@@ -4,12 +4,14 @@ import { InformationalPopover } from 'common/components/InformationalPopover/Inf
|
||||
import { ModelPicker } from 'features/parameters/components/ModelPicker';
|
||||
import { modelSelected } from 'features/parameters/store/actions';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { MdMoneyOff } from 'react-icons/md';
|
||||
import { useMainModels } from 'services/api/hooks/modelsByType';
|
||||
import { useSelectedModelConfig } from 'services/api/hooks/useSelectedModelConfig';
|
||||
import { type AnyModelConfig, isCheckpointMainModelConfig } from 'services/api/types';
|
||||
|
||||
export const InitialStateMainModelPicker = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const [modelConfigs] = useMainModels();
|
||||
const selectedModelConfig = useSelectedModelConfig();
|
||||
@@ -31,7 +33,7 @@ export const InitialStateMainModelPicker = memo(() => {
|
||||
return (
|
||||
<FormControl orientation="vertical" alignItems="unset">
|
||||
<FormLabel display="flex" fontSize="md" gap={2}>
|
||||
Select your Model{' '}
|
||||
{t('common.selectYourModel')}{' '}
|
||||
{isFluxDevSelected && (
|
||||
<InformationalPopover feature="fluxDevLicense" hideDisable={true}>
|
||||
<Flex justifyContent="flex-start">
|
||||
|
||||
@@ -27,15 +27,30 @@ export const useAutoLayoutContext = () => {
|
||||
return value;
|
||||
};
|
||||
|
||||
export type PanelParameters = {
|
||||
export type DockviewPanelParameters = {
|
||||
tab: TabName;
|
||||
focusRegion: FocusRegionName;
|
||||
i18nKey: string;
|
||||
};
|
||||
|
||||
export type GridviewPanelParameters = {
|
||||
tab: TabName;
|
||||
focusRegion: FocusRegionName;
|
||||
};
|
||||
|
||||
export type AutoLayoutGridviewComponents = Record<string, FunctionComponent<IGridviewPanelProps<PanelParameters>>>;
|
||||
export type AutoLayoutDockviewComponents = Record<string, FunctionComponent<IDockviewPanelProps<PanelParameters>>>;
|
||||
export type RootLayoutGridviewComponents = Record<string, FunctionComponent<IGridviewPanelProps<PanelParameters>>>;
|
||||
type PanelProps = IDockviewPanelProps<PanelParameters> | IGridviewPanelProps<PanelParameters>;
|
||||
export type AutoLayoutGridviewComponents = Record<
|
||||
string,
|
||||
FunctionComponent<IGridviewPanelProps<GridviewPanelParameters>>
|
||||
>;
|
||||
export type AutoLayoutDockviewComponents = Record<
|
||||
string,
|
||||
FunctionComponent<IDockviewPanelProps<DockviewPanelParameters>>
|
||||
>;
|
||||
export type RootLayoutGridviewComponents = Record<
|
||||
string,
|
||||
FunctionComponent<IGridviewPanelProps<GridviewPanelParameters>>
|
||||
>;
|
||||
type PanelProps = IDockviewPanelProps<DockviewPanelParameters> | IGridviewPanelProps<GridviewPanelParameters>;
|
||||
|
||||
export const withPanelContainer = (Component: FunctionComponent) =>
|
||||
/* eslint-disable-next-line react/display-name */
|
||||
|
||||
@@ -9,7 +9,8 @@ import { FloatingRightPanelButtons } from 'features/ui/components/FloatingRightP
|
||||
import type {
|
||||
AutoLayoutDockviewComponents,
|
||||
AutoLayoutGridviewComponents,
|
||||
PanelParameters,
|
||||
DockviewPanelParameters,
|
||||
GridviewPanelParameters,
|
||||
RootLayoutGridviewComponents,
|
||||
} from 'features/ui/layouts/auto-layout-context';
|
||||
import { AutoLayoutProvider, useAutoLayoutContext, withPanelContainer } from 'features/ui/layouts/auto-layout-context';
|
||||
@@ -63,7 +64,7 @@ const mainPanelComponents: AutoLayoutDockviewComponents = {
|
||||
|
||||
const initializeCenterPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
navigationApi.registerContainer(tab, 'main', api, () => {
|
||||
const launchpad = api.addPanel<PanelParameters>({
|
||||
const launchpad = api.addPanel<DockviewPanelParameters>({
|
||||
id: LAUNCHPAD_PANEL_ID,
|
||||
component: LAUNCHPAD_PANEL_ID,
|
||||
title: t('ui.panels.launchpad'),
|
||||
@@ -71,10 +72,11 @@ const initializeCenterPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
params: {
|
||||
tab,
|
||||
focusRegion: 'launchpad',
|
||||
i18nKey: 'ui.panels.launchpad',
|
||||
},
|
||||
});
|
||||
|
||||
api.addPanel<PanelParameters>({
|
||||
api.addPanel<DockviewPanelParameters>({
|
||||
id: WORKSPACE_PANEL_ID,
|
||||
component: WORKSPACE_PANEL_ID,
|
||||
title: t('ui.panels.canvas'),
|
||||
@@ -82,6 +84,7 @@ const initializeCenterPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
params: {
|
||||
tab,
|
||||
focusRegion: 'canvas',
|
||||
i18nKey: 'ui.panels.canvas',
|
||||
},
|
||||
position: {
|
||||
direction: 'within',
|
||||
@@ -89,7 +92,7 @@ const initializeCenterPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
},
|
||||
});
|
||||
|
||||
api.addPanel<PanelParameters>({
|
||||
api.addPanel<DockviewPanelParameters>({
|
||||
id: VIEWER_PANEL_ID,
|
||||
component: VIEWER_PANEL_ID,
|
||||
title: t('ui.panels.imageViewer'),
|
||||
@@ -97,6 +100,7 @@ const initializeCenterPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
params: {
|
||||
tab,
|
||||
focusRegion: 'viewer',
|
||||
i18nKey: 'ui.panels.imageViewer',
|
||||
},
|
||||
position: {
|
||||
direction: 'within',
|
||||
@@ -145,7 +149,7 @@ const rightPanelComponents: AutoLayoutGridviewComponents = {
|
||||
|
||||
const initializeRightPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
navigationApi.registerContainer(tab, 'right', api, () => {
|
||||
const gallery = api.addPanel<PanelParameters>({
|
||||
const gallery = api.addPanel<GridviewPanelParameters>({
|
||||
id: GALLERY_PANEL_ID,
|
||||
component: GALLERY_PANEL_ID,
|
||||
minimumWidth: RIGHT_PANEL_MIN_SIZE_PX,
|
||||
@@ -156,7 +160,7 @@ const initializeRightPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
},
|
||||
});
|
||||
|
||||
const boards = api.addPanel<PanelParameters>({
|
||||
const boards = api.addPanel<GridviewPanelParameters>({
|
||||
id: BOARDS_PANEL_ID,
|
||||
component: BOARDS_PANEL_ID,
|
||||
minimumHeight: BOARD_PANEL_MIN_HEIGHT_PX,
|
||||
@@ -170,7 +174,7 @@ const initializeRightPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
},
|
||||
});
|
||||
|
||||
api.addPanel<PanelParameters>({
|
||||
api.addPanel<GridviewPanelParameters>({
|
||||
id: LAYERS_PANEL_ID,
|
||||
component: LAYERS_PANEL_ID,
|
||||
minimumHeight: LAYERS_PANEL_MIN_HEIGHT_PX,
|
||||
@@ -215,7 +219,7 @@ const leftPanelComponents: AutoLayoutGridviewComponents = {
|
||||
|
||||
const initializeLeftPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
navigationApi.registerContainer(tab, 'left', api, () => {
|
||||
api.addPanel<PanelParameters>({
|
||||
api.addPanel<GridviewPanelParameters>({
|
||||
id: SETTINGS_PANEL_ID,
|
||||
component: SETTINGS_PANEL_ID,
|
||||
params: {
|
||||
|
||||
@@ -8,7 +8,8 @@ import { FloatingRightPanelButtons } from 'features/ui/components/FloatingRightP
|
||||
import type {
|
||||
AutoLayoutDockviewComponents,
|
||||
AutoLayoutGridviewComponents,
|
||||
PanelParameters,
|
||||
DockviewPanelParameters,
|
||||
GridviewPanelParameters,
|
||||
RootLayoutGridviewComponents,
|
||||
} from 'features/ui/layouts/auto-layout-context';
|
||||
import { AutoLayoutProvider, useAutoLayoutContext, withPanelContainer } from 'features/ui/layouts/auto-layout-context';
|
||||
@@ -57,7 +58,7 @@ const mainPanelComponents: AutoLayoutDockviewComponents = {
|
||||
|
||||
const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
navigationApi.registerContainer(tab, 'main', api, () => {
|
||||
const launchpad = api.addPanel<PanelParameters>({
|
||||
const launchpad = api.addPanel<DockviewPanelParameters>({
|
||||
id: LAUNCHPAD_PANEL_ID,
|
||||
component: LAUNCHPAD_PANEL_ID,
|
||||
title: t('ui.panels.launchpad'),
|
||||
@@ -65,10 +66,11 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
params: {
|
||||
tab,
|
||||
focusRegion: 'launchpad',
|
||||
i18nKey: 'ui.panels.launchpad',
|
||||
},
|
||||
});
|
||||
|
||||
api.addPanel<PanelParameters>({
|
||||
api.addPanel<DockviewPanelParameters>({
|
||||
id: VIEWER_PANEL_ID,
|
||||
component: VIEWER_PANEL_ID,
|
||||
title: t('ui.panels.imageViewer'),
|
||||
@@ -76,6 +78,7 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
params: {
|
||||
tab,
|
||||
focusRegion: 'viewer',
|
||||
i18nKey: 'ui.panels.imageViewer',
|
||||
},
|
||||
position: {
|
||||
direction: 'within',
|
||||
@@ -123,7 +126,7 @@ const rightPanelComponents: AutoLayoutGridviewComponents = {
|
||||
|
||||
const initializeRightPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
navigationApi.registerContainer(tab, 'right', api, () => {
|
||||
const gallery = api.addPanel<PanelParameters>({
|
||||
const gallery = api.addPanel<GridviewPanelParameters>({
|
||||
id: GALLERY_PANEL_ID,
|
||||
component: GALLERY_PANEL_ID,
|
||||
minimumWidth: RIGHT_PANEL_MIN_SIZE_PX,
|
||||
@@ -134,7 +137,7 @@ const initializeRightPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
},
|
||||
});
|
||||
|
||||
const boards = api.addPanel<PanelParameters>({
|
||||
const boards = api.addPanel<GridviewPanelParameters>({
|
||||
id: BOARDS_PANEL_ID,
|
||||
component: BOARDS_PANEL_ID,
|
||||
minimumHeight: BOARD_PANEL_MIN_HEIGHT_PX,
|
||||
@@ -179,7 +182,7 @@ const leftPanelComponents: AutoLayoutGridviewComponents = {
|
||||
|
||||
const initializeLeftPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
navigationApi.registerContainer(tab, 'left', api, () => {
|
||||
api.addPanel<PanelParameters>({
|
||||
api.addPanel<GridviewPanelParameters>({
|
||||
id: SETTINGS_PANEL_ID,
|
||||
component: SETTINGS_PANEL_ID,
|
||||
params: {
|
||||
@@ -218,13 +221,13 @@ const rootPanelComponents: RootLayoutGridviewComponents = {
|
||||
|
||||
const initializeRootPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
navigationApi.registerContainer(tab, 'root', api, () => {
|
||||
const main = api.addPanel<PanelParameters>({
|
||||
const main = api.addPanel<GridviewPanelParameters>({
|
||||
id: MAIN_PANEL_ID,
|
||||
component: MAIN_PANEL_ID,
|
||||
priority: LayoutPriority.High,
|
||||
});
|
||||
|
||||
const left = api.addPanel<PanelParameters>({
|
||||
const left = api.addPanel<GridviewPanelParameters>({
|
||||
id: LEFT_PANEL_ID,
|
||||
component: LEFT_PANEL_ID,
|
||||
minimumWidth: LEFT_PANEL_MIN_SIZE_PX,
|
||||
@@ -234,7 +237,7 @@ const initializeRootPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
},
|
||||
});
|
||||
|
||||
const right = api.addPanel<PanelParameters>({
|
||||
const right = api.addPanel<GridviewPanelParameters>({
|
||||
id: RIGHT_PANEL_ID,
|
||||
component: RIGHT_PANEL_ID,
|
||||
minimumWidth: RIGHT_PANEL_MIN_SIZE_PX,
|
||||
|
||||
@@ -448,6 +448,35 @@ export class NavigationApi {
|
||||
return this.panels.get(key);
|
||||
};
|
||||
|
||||
/**
|
||||
* Expand the left panel in the currently active tab.
|
||||
*
|
||||
* This method will not wait for the panel to be registered.
|
||||
*
|
||||
* @returns True if the panel was expanded, false if it was not found or an error occurred
|
||||
*/
|
||||
expandLeftPanel = (): boolean => {
|
||||
const activeTab = this._app?.activeTab.get() ?? null;
|
||||
if (!activeTab) {
|
||||
log.warn('No active tab found to expand left panel');
|
||||
return false;
|
||||
}
|
||||
const leftPanel = this.getPanel(activeTab, LEFT_PANEL_ID);
|
||||
if (!leftPanel) {
|
||||
log.warn(`Left panel not found in active tab "${activeTab}"`);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(leftPanel instanceof GridviewPanel)) {
|
||||
log.error(`Right panels must be instances of GridviewPanel`);
|
||||
return false;
|
||||
}
|
||||
|
||||
this._expandPanel(leftPanel, LEFT_PANEL_MIN_SIZE_PX);
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Toggle the left panel in the currently active tab.
|
||||
*
|
||||
@@ -481,6 +510,35 @@ export class NavigationApi {
|
||||
return true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Expand the right panel in the currently active tab.
|
||||
*
|
||||
* This method will not wait for the panel to be registered.
|
||||
*
|
||||
* @returns True if the panel was expanded, false if it was not found or an error occurred
|
||||
*/
|
||||
expandRightPanel = (): boolean => {
|
||||
const activeTab = this._app?.activeTab.get() ?? null;
|
||||
if (!activeTab) {
|
||||
log.warn('No active tab found to expand right panel');
|
||||
return false;
|
||||
}
|
||||
const rightPanel = this.getPanel(activeTab, RIGHT_PANEL_ID);
|
||||
if (!rightPanel) {
|
||||
log.warn(`Right panel not found in active tab "${activeTab}"`);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(rightPanel instanceof GridviewPanel)) {
|
||||
log.error(`Right panels must be instances of GridviewPanel`);
|
||||
return false;
|
||||
}
|
||||
|
||||
this._expandPanel(rightPanel, RIGHT_PANEL_MIN_SIZE_PX);
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Toggle the right panel in the currently active tab.
|
||||
*
|
||||
|
||||
@@ -8,7 +8,8 @@ import { FloatingRightPanelButtons } from 'features/ui/components/FloatingRightP
|
||||
import type {
|
||||
AutoLayoutDockviewComponents,
|
||||
AutoLayoutGridviewComponents,
|
||||
PanelParameters,
|
||||
DockviewPanelParameters,
|
||||
GridviewPanelParameters,
|
||||
RootLayoutGridviewComponents,
|
||||
} from 'features/ui/layouts/auto-layout-context';
|
||||
import { AutoLayoutProvider, useAutoLayoutContext, withPanelContainer } from 'features/ui/layouts/auto-layout-context';
|
||||
@@ -57,7 +58,7 @@ const mainPanelComponents: AutoLayoutDockviewComponents = {
|
||||
|
||||
const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
navigationApi.registerContainer(tab, 'main', api, () => {
|
||||
const launchpad = api.addPanel<PanelParameters>({
|
||||
const launchpad = api.addPanel<DockviewPanelParameters>({
|
||||
id: LAUNCHPAD_PANEL_ID,
|
||||
component: LAUNCHPAD_PANEL_ID,
|
||||
title: t('ui.panels.launchpad'),
|
||||
@@ -65,10 +66,11 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
params: {
|
||||
tab,
|
||||
focusRegion: 'launchpad',
|
||||
i18nKey: 'ui.panels.launchpad',
|
||||
},
|
||||
});
|
||||
|
||||
api.addPanel<PanelParameters>({
|
||||
api.addPanel<DockviewPanelParameters>({
|
||||
id: VIEWER_PANEL_ID,
|
||||
component: VIEWER_PANEL_ID,
|
||||
title: t('ui.panels.imageViewer'),
|
||||
@@ -76,6 +78,7 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
params: {
|
||||
tab,
|
||||
focusRegion: 'viewer',
|
||||
i18nKey: 'ui.panels.imageViewer',
|
||||
},
|
||||
position: {
|
||||
direction: 'within',
|
||||
@@ -121,7 +124,7 @@ const rightPanelComponents: AutoLayoutGridviewComponents = {
|
||||
|
||||
const initializeRightPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
navigationApi.registerContainer(tab, 'right', api, () => {
|
||||
const gallery = api.addPanel<PanelParameters>({
|
||||
const gallery = api.addPanel<GridviewPanelParameters>({
|
||||
id: GALLERY_PANEL_ID,
|
||||
component: GALLERY_PANEL_ID,
|
||||
minimumWidth: RIGHT_PANEL_MIN_SIZE_PX,
|
||||
@@ -132,7 +135,7 @@ const initializeRightPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
},
|
||||
});
|
||||
|
||||
const boards = api.addPanel<PanelParameters>({
|
||||
const boards = api.addPanel<GridviewPanelParameters>({
|
||||
id: BOARDS_PANEL_ID,
|
||||
component: BOARDS_PANEL_ID,
|
||||
minimumHeight: BOARD_PANEL_MIN_HEIGHT_PX,
|
||||
@@ -177,7 +180,7 @@ const leftPanelComponents: AutoLayoutGridviewComponents = {
|
||||
|
||||
const initializeLeftPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
navigationApi.registerContainer(tab, 'left', api, () => {
|
||||
api.addPanel<PanelParameters>({
|
||||
api.addPanel<GridviewPanelParameters>({
|
||||
id: SETTINGS_PANEL_ID,
|
||||
component: SETTINGS_PANEL_ID,
|
||||
params: {
|
||||
|
||||
@@ -28,11 +28,15 @@ export const useCollapsibleGridviewPanel = (
|
||||
const lastExpandedSizeRef = useRef<number>(0);
|
||||
const collapse = useCallback(() => {
|
||||
const panel = navigationApi.getPanel(tab, panelId);
|
||||
|
||||
if (!panel || !(panel instanceof GridviewPanel)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const isCollapsed = getIsCollapsed(panel, orientation, collapsedSize);
|
||||
if (isCollapsed) {
|
||||
return;
|
||||
}
|
||||
|
||||
lastExpandedSizeRef.current = orientation === 'vertical' ? panel.height : panel.width;
|
||||
|
||||
if (orientation === 'vertical') {
|
||||
@@ -48,6 +52,11 @@ export const useCollapsibleGridviewPanel = (
|
||||
return;
|
||||
}
|
||||
|
||||
const isCollapsed = getIsCollapsed(panel, orientation, collapsedSize);
|
||||
if (!isCollapsed) {
|
||||
return;
|
||||
}
|
||||
|
||||
let newSize = lastExpandedSizeRef.current || defaultSize;
|
||||
if (minExpandedSize && newSize < minExpandedSize) {
|
||||
newSize = minExpandedSize;
|
||||
@@ -58,7 +67,7 @@ export const useCollapsibleGridviewPanel = (
|
||||
} else {
|
||||
panel.api.setSize({ width: newSize });
|
||||
}
|
||||
}, [defaultSize, minExpandedSize, orientation, panelId, tab]);
|
||||
}, [defaultSize, minExpandedSize, orientation, collapsedSize, panelId, tab]);
|
||||
|
||||
const toggle = useCallback(() => {
|
||||
const panel = navigationApi.getPanel(tab, panelId);
|
||||
@@ -66,6 +75,7 @@ export const useCollapsibleGridviewPanel = (
|
||||
return;
|
||||
}
|
||||
const isCollapsed = getIsCollapsed(panel, orientation, collapsedSize);
|
||||
|
||||
if (isCollapsed) {
|
||||
expand();
|
||||
} else {
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
import type { TabName } from 'features/ui/store/uiTypes';
|
||||
|
||||
import {
|
||||
GALLERY_PANEL_DEFAULT_HEIGHT_PX,
|
||||
GALLERY_PANEL_ID,
|
||||
GALLERY_PANEL_MIN_EXPANDED_HEIGHT_PX,
|
||||
GALLERY_PANEL_MIN_HEIGHT_PX,
|
||||
} from './shared';
|
||||
import { useCollapsibleGridviewPanel } from './use-collapsible-gridview-panel';
|
||||
|
||||
export const useGalleryPanel = (tab: TabName) => {
|
||||
return useCollapsibleGridviewPanel(
|
||||
tab,
|
||||
GALLERY_PANEL_ID,
|
||||
'vertical',
|
||||
GALLERY_PANEL_DEFAULT_HEIGHT_PX,
|
||||
GALLERY_PANEL_MIN_HEIGHT_PX,
|
||||
GALLERY_PANEL_MIN_EXPANDED_HEIGHT_PX
|
||||
);
|
||||
};
|
||||
@@ -10,7 +10,8 @@ import { FloatingRightPanelButtons } from 'features/ui/components/FloatingRightP
|
||||
import type {
|
||||
AutoLayoutDockviewComponents,
|
||||
AutoLayoutGridviewComponents,
|
||||
PanelParameters,
|
||||
DockviewPanelParameters,
|
||||
GridviewPanelParameters,
|
||||
RootLayoutGridviewComponents,
|
||||
} from 'features/ui/layouts/auto-layout-context';
|
||||
import { AutoLayoutProvider, useAutoLayoutContext, withPanelContainer } from 'features/ui/layouts/auto-layout-context';
|
||||
@@ -60,7 +61,7 @@ const mainPanelComponents: AutoLayoutDockviewComponents = {
|
||||
|
||||
const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
navigationApi.registerContainer(tab, 'main', api, () => {
|
||||
const launchpad = api.addPanel<PanelParameters>({
|
||||
const launchpad = api.addPanel<DockviewPanelParameters>({
|
||||
id: LAUNCHPAD_PANEL_ID,
|
||||
component: LAUNCHPAD_PANEL_ID,
|
||||
title: t('ui.panels.launchpad'),
|
||||
@@ -68,10 +69,11 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
params: {
|
||||
tab,
|
||||
focusRegion: 'launchpad',
|
||||
i18nKey: 'ui.panels.launchpad',
|
||||
},
|
||||
});
|
||||
|
||||
api.addPanel<PanelParameters>({
|
||||
api.addPanel<DockviewPanelParameters>({
|
||||
id: WORKSPACE_PANEL_ID,
|
||||
component: WORKSPACE_PANEL_ID,
|
||||
title: t('ui.panels.workflowEditor'),
|
||||
@@ -79,6 +81,7 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
params: {
|
||||
tab,
|
||||
focusRegion: 'workflows',
|
||||
i18nKey: 'ui.panels.workflowEditor',
|
||||
},
|
||||
position: {
|
||||
direction: 'within',
|
||||
@@ -86,7 +89,7 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
},
|
||||
});
|
||||
|
||||
api.addPanel<PanelParameters>({
|
||||
api.addPanel<DockviewPanelParameters>({
|
||||
id: VIEWER_PANEL_ID,
|
||||
component: VIEWER_PANEL_ID,
|
||||
title: t('ui.panels.imageViewer'),
|
||||
@@ -94,6 +97,7 @@ const initializeMainPanelLayout = (tab: TabName, api: DockviewApi) => {
|
||||
params: {
|
||||
tab,
|
||||
focusRegion: 'viewer',
|
||||
i18nKey: 'ui.panels.imageViewer',
|
||||
},
|
||||
position: {
|
||||
direction: 'within',
|
||||
@@ -141,7 +145,7 @@ const rightPanelComponents: AutoLayoutGridviewComponents = {
|
||||
|
||||
const initializeRightPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
navigationApi.registerContainer(tab, 'right', api, () => {
|
||||
const gallery = api.addPanel<PanelParameters>({
|
||||
const gallery = api.addPanel<GridviewPanelParameters>({
|
||||
id: GALLERY_PANEL_ID,
|
||||
component: GALLERY_PANEL_ID,
|
||||
minimumWidth: RIGHT_PANEL_MIN_SIZE_PX,
|
||||
@@ -152,7 +156,7 @@ const initializeRightPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
},
|
||||
});
|
||||
|
||||
const boards = api.addPanel<PanelParameters>({
|
||||
const boards = api.addPanel<GridviewPanelParameters>({
|
||||
id: BOARDS_PANEL_ID,
|
||||
component: BOARDS_PANEL_ID,
|
||||
minimumHeight: BOARD_PANEL_MIN_HEIGHT_PX,
|
||||
@@ -197,7 +201,7 @@ const leftPanelComponents: AutoLayoutGridviewComponents = {
|
||||
|
||||
const initializeLeftPanelLayout = (tab: TabName, api: GridviewApi) => {
|
||||
navigationApi.registerContainer(tab, 'left', api, () => {
|
||||
api.addPanel<PanelParameters>({
|
||||
api.addPanel<GridviewPanelParameters>({
|
||||
id: SETTINGS_PANEL_ID,
|
||||
component: SETTINGS_PANEL_ID,
|
||||
params: {
|
||||
|
||||
@@ -105,6 +105,10 @@ export const uiSliceConfig: SliceConfig<typeof slice> = {
|
||||
state.activeTab = 'canvas';
|
||||
state._version = 3;
|
||||
}
|
||||
if (state._version === 3) {
|
||||
state.panels = {};
|
||||
state._version = 4;
|
||||
}
|
||||
return zUIState.parse(state);
|
||||
},
|
||||
persistDenylist: ['shouldShowImageDetails'],
|
||||
|
||||
@@ -13,7 +13,7 @@ const zSerializable = z.any().refine(isPlainObject);
|
||||
export type Serializable = z.infer<typeof zSerializable>;
|
||||
|
||||
export const zUIState = z.object({
|
||||
_version: z.literal(3),
|
||||
_version: z.literal(4),
|
||||
activeTab: zTabName,
|
||||
shouldShowImageDetails: z.boolean(),
|
||||
shouldShowProgressInViewer: z.boolean(),
|
||||
@@ -26,7 +26,7 @@ export const zUIState = z.object({
|
||||
});
|
||||
export type UIState = z.infer<typeof zUIState>;
|
||||
export const getInitialUIState = (): UIState => ({
|
||||
_version: 3 as const,
|
||||
_version: 4 as const,
|
||||
activeTab: 'generate' as const,
|
||||
shouldShowImageDetails: false,
|
||||
shouldShowProgressInViewer: true,
|
||||
|
||||
@@ -31,7 +31,7 @@ import {
|
||||
export { default as InvokeAIUI } from './app/components/InvokeAIUI';
|
||||
export type { StudioInitAction } from './app/hooks/useStudioInitAction';
|
||||
export type { LoggingOverrides } from './app/logging/logger';
|
||||
export type { PartialAppConfig } from './app/types/invokeai';
|
||||
export type { NumericalParameterConfig, PartialAppConfig } from './app/types/invokeai';
|
||||
export { default as Loading } from './common/components/Loading/Loading';
|
||||
export { default as HotkeysModal } from './features/system/components/HotkeysModal/HotkeysModal';
|
||||
export { default as InvokeAiLogoComponent } from './features/system/components/InvokeAILogoComponent';
|
||||
|
||||
@@ -5,6 +5,7 @@ import {
|
||||
selectAutoSwitch,
|
||||
selectGalleryView,
|
||||
selectGetImageNamesQueryArgs,
|
||||
selectListBoardsQueryArgs,
|
||||
selectSelectedBoardId,
|
||||
} from 'features/gallery/store/gallerySelectors';
|
||||
import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
@@ -76,6 +77,14 @@ export const buildOnInvocationComplete = (
|
||||
}
|
||||
dispatch(boardsApi.util.upsertQueryEntries(entries));
|
||||
|
||||
dispatch(
|
||||
boardsApi.util.updateQueryData('listAllBoards', selectListBoardsQueryArgs(getState()), (draft) => {
|
||||
for (const board of draft) {
|
||||
board.image_count = board.image_count + (boardTotalAdditions[board.board_id] ?? 0);
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
/**
|
||||
* Optimistic update and cache invalidation for image names queries that match this image's board and categories.
|
||||
* - Optimistic update for the cache that does not have a search term (we cannot derive the correct insertion
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "6.3.0rc2"
|
||||
__version__ = "6.4.0"
|
||||
|
||||
@@ -46,7 +46,7 @@ def fetch_commits_between_tags(
|
||||
commit_info: list[CommitInfo] = []
|
||||
headers = {"Authorization": f"token {token}"} if token else None
|
||||
|
||||
# Get the total number of pages w/ an intial request - a bit hacky but it works...
|
||||
# Get the total number of pages w/ an initial request - a bit hacky but it works...
|
||||
response = requests.get(
|
||||
f"https://api.github.com/repos/{org_name}/{repo_name}/compare/{from_ref}...{to_ref}?page=1&per_page=100",
|
||||
headers=headers,
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
@@ -20,11 +22,17 @@ class DummyModule(torch.nn.Module):
|
||||
return x
|
||||
|
||||
|
||||
is_github_ci = os.getenv("GITHUB_ACTIONS") == "true"
|
||||
|
||||
parameterize_mps_and_cuda = pytest.mark.parametrize(
|
||||
("device"),
|
||||
[
|
||||
pytest.param(
|
||||
"mps", marks=pytest.mark.skipif(not torch.backends.mps.is_available(), reason="MPS is not available.")
|
||||
"mps",
|
||||
marks=pytest.mark.skipif(
|
||||
is_github_ci or not torch.backends.mps.is_available(),
|
||||
reason="MPS is very flaky in CI" if is_github_ci else "MPS is not available.",
|
||||
),
|
||||
),
|
||||
pytest.param("cuda", marks=pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA is not available.")),
|
||||
],
|
||||
|
||||
@@ -0,0 +1,766 @@
|
||||
# A sample state dict in the Diffusers FLUX LoRA format with base_model.model prefix.
|
||||
# These keys are based on the LoRA model in peft_adapter_model.safetensors
|
||||
state_dict_keys = {
|
||||
"base_model.model.proj_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.proj_out.lora_B.weight": [64, 4],
|
||||
"base_model.model.single_transformer_blocks.0.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.0.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.0.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.0.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.0.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.0.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.0.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.0.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.0.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.0.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.1.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.1.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.1.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.1.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.1.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.1.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.1.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.1.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.1.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.1.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.10.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.10.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.10.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.10.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.10.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.10.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.10.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.10.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.10.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.10.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.11.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.11.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.11.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.11.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.11.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.11.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.11.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.11.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.11.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.11.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.12.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.12.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.12.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.12.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.12.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.12.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.12.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.12.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.12.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.12.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.13.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.13.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.13.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.13.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.13.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.13.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.13.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.13.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.13.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.13.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.14.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.14.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.14.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.14.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.14.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.14.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.14.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.14.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.14.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.14.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.15.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.15.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.15.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.15.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.15.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.15.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.15.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.15.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.15.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.15.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.16.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.16.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.16.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.16.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.16.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.16.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.16.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.16.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.16.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.16.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.17.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.17.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.17.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.17.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.17.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.17.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.17.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.17.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.17.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.17.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.18.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.18.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.18.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.18.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.18.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.18.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.18.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.18.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.18.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.18.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.19.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.19.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.19.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.19.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.19.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.19.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.19.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.19.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.19.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.19.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.2.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.2.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.2.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.2.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.2.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.2.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.2.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.2.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.2.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.2.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.20.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.20.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.20.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.20.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.20.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.20.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.20.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.20.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.20.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.20.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.21.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.21.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.21.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.21.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.21.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.21.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.21.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.21.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.21.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.21.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.22.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.22.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.22.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.22.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.22.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.22.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.22.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.22.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.22.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.22.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.23.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.23.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.23.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.23.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.23.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.23.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.23.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.23.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.23.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.23.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.24.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.24.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.24.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.24.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.24.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.24.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.24.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.24.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.24.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.24.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.25.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.25.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.25.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.25.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.25.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.25.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.25.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.25.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.25.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.25.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.26.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.26.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.26.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.26.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.26.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.26.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.26.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.26.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.26.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.26.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.27.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.27.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.27.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.27.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.27.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.27.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.27.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.27.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.27.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.27.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.28.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.28.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.28.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.28.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.28.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.28.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.28.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.28.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.28.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.28.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.29.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.29.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.29.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.29.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.29.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.29.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.29.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.29.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.29.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.29.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.3.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.3.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.3.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.3.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.3.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.3.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.3.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.3.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.3.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.3.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.30.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.30.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.30.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.30.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.30.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.30.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.30.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.30.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.30.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.30.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.31.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.31.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.31.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.31.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.31.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.31.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.31.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.31.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.31.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.31.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.32.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.32.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.32.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.32.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.32.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.32.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.32.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.32.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.32.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.32.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.33.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.33.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.33.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.33.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.33.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.33.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.33.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.33.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.33.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.33.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.34.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.34.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.34.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.34.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.34.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.34.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.34.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.34.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.34.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.34.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.35.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.35.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.35.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.35.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.35.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.35.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.35.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.35.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.35.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.35.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.36.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.36.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.36.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.36.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.36.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.36.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.36.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.36.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.36.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.36.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.37.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.37.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.37.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.37.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.37.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.37.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.37.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.37.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.37.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.37.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.4.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.4.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.4.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.4.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.4.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.4.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.4.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.4.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.4.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.4.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.5.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.5.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.5.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.5.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.5.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.5.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.5.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.5.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.5.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.5.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.6.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.6.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.6.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.6.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.6.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.6.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.6.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.6.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.6.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.6.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.7.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.7.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.7.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.7.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.7.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.7.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.7.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.7.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.7.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.7.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.8.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.8.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.8.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.8.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.8.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.8.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.8.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.8.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.8.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.8.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.9.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.9.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.9.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.9.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.9.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.9.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.single_transformer_blocks.9.proj_mlp.lora_A.weight": [4, 3072],
|
||||
"base_model.model.single_transformer_blocks.9.proj_mlp.lora_B.weight": [12288, 4],
|
||||
"base_model.model.single_transformer_blocks.9.proj_out.lora_A.weight": [4, 15360],
|
||||
"base_model.model.single_transformer_blocks.9.proj_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.0.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.0.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.0.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.0.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.0.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.0.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.0.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.0.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.0.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.0.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.0.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.0.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.0.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.0.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.0.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.0.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.0.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.0.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.0.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.0.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.1.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.1.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.1.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.1.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.1.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.1.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.1.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.1.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.1.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.1.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.1.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.1.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.1.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.1.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.1.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.1.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.1.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.1.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.1.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.1.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.10.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.10.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.10.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.10.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.10.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.10.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.10.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.10.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.10.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.10.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.10.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.10.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.10.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.10.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.10.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.10.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.10.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.10.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.10.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.10.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.11.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.11.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.11.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.11.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.11.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.11.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.11.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.11.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.11.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.11.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.11.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.11.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.11.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.11.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.11.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.11.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.11.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.11.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.11.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.11.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.12.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.12.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.12.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.12.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.12.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.12.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.12.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.12.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.12.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.12.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.12.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.12.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.12.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.12.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.12.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.12.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.12.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.12.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.12.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.12.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.13.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.13.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.13.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.13.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.13.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.13.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.13.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.13.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.13.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.13.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.13.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.13.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.13.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.13.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.13.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.13.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.13.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.13.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.13.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.13.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.14.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.14.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.14.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.14.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.14.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.14.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.14.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.14.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.14.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.14.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.14.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.14.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.14.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.14.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.14.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.14.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.14.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.14.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.14.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.14.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.15.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.15.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.15.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.15.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.15.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.15.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.15.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.15.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.15.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.15.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.15.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.15.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.15.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.15.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.15.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.15.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.15.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.15.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.15.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.15.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.16.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.16.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.16.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.16.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.16.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.16.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.16.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.16.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.16.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.16.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.16.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.16.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.16.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.16.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.16.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.16.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.16.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.16.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.16.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.16.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.17.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.17.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.17.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.17.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.17.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.17.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.17.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.17.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.17.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.17.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.17.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.17.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.17.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.17.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.17.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.17.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.17.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.17.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.17.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.17.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.18.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.18.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.18.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.18.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.18.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.18.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.18.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.18.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.18.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.18.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.18.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.18.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.18.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.18.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.18.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.18.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.18.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.18.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.18.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.18.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.2.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.2.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.2.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.2.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.2.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.2.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.2.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.2.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.2.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.2.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.2.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.2.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.2.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.2.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.2.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.2.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.2.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.2.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.2.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.2.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.3.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.3.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.3.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.3.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.3.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.3.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.3.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.3.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.3.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.3.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.3.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.3.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.3.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.3.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.3.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.3.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.3.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.3.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.3.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.3.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.4.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.4.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.4.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.4.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.4.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.4.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.4.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.4.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.4.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.4.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.4.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.4.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.4.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.4.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.4.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.4.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.4.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.4.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.4.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.4.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.5.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.5.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.5.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.5.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.5.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.5.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.5.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.5.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.5.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.5.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.5.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.5.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.5.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.5.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.5.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.5.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.5.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.5.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.5.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.5.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.6.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.6.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.6.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.6.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.6.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.6.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.6.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.6.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.6.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.6.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.6.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.6.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.6.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.6.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.6.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.6.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.6.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.6.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.6.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.6.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.7.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.7.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.7.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.7.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.7.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.7.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.7.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.7.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.7.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.7.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.7.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.7.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.7.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.7.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.7.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.7.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.7.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.7.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.7.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.7.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.8.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.8.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.8.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.8.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.8.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.8.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.8.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.8.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.8.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.8.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.8.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.8.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.8.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.8.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.8.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.8.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.8.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.8.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.8.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.8.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.9.attn.add_k_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.9.attn.add_k_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.9.attn.add_q_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.9.attn.add_q_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.9.attn.add_v_proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.9.attn.add_v_proj.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.9.attn.to_add_out.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.9.attn.to_add_out.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.9.attn.to_k.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.9.attn.to_k.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.9.attn.to_out.0.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.9.attn.to_out.0.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.9.attn.to_q.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.9.attn.to_q.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.9.attn.to_v.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.9.attn.to_v.lora_B.weight": [3072, 4],
|
||||
"base_model.model.transformer_blocks.9.ff.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.9.ff.net.0.proj.lora_B.weight": [12288, 4],
|
||||
"base_model.model.transformer_blocks.9.ff_context.net.0.proj.lora_A.weight": [4, 3072],
|
||||
"base_model.model.transformer_blocks.9.ff_context.net.0.proj.lora_B.weight": [12288, 4],
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user