mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-21 23:07:54 -05:00
Compare commits
88 Commits
ryan/icl
...
ryan/flux-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d04ec3f95 | ||
|
|
4581a37a48 | ||
|
|
54b7f9a063 | ||
|
|
7d488a5352 | ||
|
|
4d7667f63d | ||
|
|
08704ee8ec | ||
|
|
5910892c33 | ||
|
|
46a09d9e90 | ||
|
|
df0c7d73f3 | ||
|
|
3905c97e32 | ||
|
|
0be796a808 | ||
|
|
7dd33b0f39 | ||
|
|
484aaf1595 | ||
|
|
c276b60af9 | ||
|
|
5d8dd6e26e | ||
|
|
5bca68d873 | ||
|
|
64364e7911 | ||
|
|
6565cea039 | ||
|
|
3ebd8d6c07 | ||
|
|
e970185161 | ||
|
|
fa5653cdf7 | ||
|
|
9a7b000995 | ||
|
|
3a27242838 | ||
|
|
8cfb032051 | ||
|
|
06a9d4e2b2 | ||
|
|
ed46acee79 | ||
|
|
b54463d294 | ||
|
|
faee79dc95 | ||
|
|
965cd76e33 | ||
|
|
e5e8cbf34c | ||
|
|
3412a52594 | ||
|
|
e01f66b026 | ||
|
|
53abdde242 | ||
|
|
94c088300f | ||
|
|
3741a6f5e0 | ||
|
|
059336258f | ||
|
|
2c23b8414c | ||
|
|
271cc52c80 | ||
|
|
20356c0746 | ||
|
|
e44458609f | ||
|
|
69d86a7696 | ||
|
|
56db1a9292 | ||
|
|
cf50e5eeee | ||
|
|
c9c07968d2 | ||
|
|
97d0757176 | ||
|
|
0f51b677a9 | ||
|
|
56ca94c3a9 | ||
|
|
28d169f859 | ||
|
|
92f71d99ee | ||
|
|
0764c02b1d | ||
|
|
081c7569fe | ||
|
|
20f6532ee8 | ||
|
|
b9e8910478 | ||
|
|
ded8391e3c | ||
|
|
e9dd2c396a | ||
|
|
0d86de0cb5 | ||
|
|
bad1149504 | ||
|
|
fda7aaa7ca | ||
|
|
85c616fa34 | ||
|
|
549f4e9794 | ||
|
|
ef8ededd2f | ||
|
|
1948ffe106 | ||
|
|
c70f4404c4 | ||
|
|
b157ae928c | ||
|
|
7a0871992d | ||
|
|
b38e2e14f4 | ||
|
|
7c0e70ec84 | ||
|
|
a89ae9d2bf | ||
|
|
ad1fcb3f07 | ||
|
|
87d74b910b | ||
|
|
7ad1c297a4 | ||
|
|
fbc629faa6 | ||
|
|
7baa6b3c09 | ||
|
|
53d482bade | ||
|
|
5aca04b51b | ||
|
|
ea8787c8ff | ||
|
|
cead2c4445 | ||
|
|
f76ac1808c | ||
|
|
f01210861b | ||
|
|
f757f23ef0 | ||
|
|
872a6ef209 | ||
|
|
4267e5ffc4 | ||
|
|
a69c5ff9ef | ||
|
|
3ebd8d7d1b | ||
|
|
1fd80d54a4 | ||
|
|
991f63e455 | ||
|
|
6a1efd3527 | ||
|
|
0eadc0dd9e |
14
SECURITY.md
Normal file
14
SECURITY.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Only the latest version of Invoke will receive security updates.
|
||||
We do not currently maintain multiple versions of the application with updates.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
To report a vulnerability, contact the Invoke team directly at security@invoke.ai
|
||||
|
||||
At this time, we do not maintain a formal bug bounty program.
|
||||
|
||||
You can also share identified security issues with our team on huntr.com
|
||||
@@ -38,7 +38,7 @@ This project is a combined effort of dedicated people from across the world. [C
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](https://github.com/invoke-ai/InvokeAI/blob/main/CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
|
||||
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](https://github.com/invoke-ai/InvokeAI/blob/main/docs/CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
|
||||
|
||||
By making a contribution to this project, you certify that:
|
||||
|
||||
|
||||
@@ -1,98 +1,120 @@
|
||||
from typing import Any, Union
|
||||
from typing import Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
from PIL import Image
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, LatentsField
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, LatentsField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
def slerp(
|
||||
t: Union[float, np.ndarray],
|
||||
v0: Union[torch.Tensor, np.ndarray],
|
||||
v1: Union[torch.Tensor, np.ndarray],
|
||||
device: torch.device,
|
||||
DOT_THRESHOLD: float = 0.9995,
|
||||
):
|
||||
"""
|
||||
Spherical linear interpolation
|
||||
Args:
|
||||
t (float/np.ndarray): Float value between 0.0 and 1.0
|
||||
v0 (np.ndarray): Starting vector
|
||||
v1 (np.ndarray): Final vector
|
||||
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
||||
colineal. Not recommended to alter this.
|
||||
Returns:
|
||||
v2 (np.ndarray): Interpolation vector between v0 and v1
|
||||
"""
|
||||
inputs_are_torch = False
|
||||
if not isinstance(v0, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v0 = v0.detach().cpu().numpy()
|
||||
if not isinstance(v1, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v1 = v1.detach().cpu().numpy()
|
||||
|
||||
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
||||
if np.abs(dot) > DOT_THRESHOLD:
|
||||
v2 = (1 - t) * v0 + t * v1
|
||||
else:
|
||||
theta_0 = np.arccos(dot)
|
||||
sin_theta_0 = np.sin(theta_0)
|
||||
theta_t = theta_0 * t
|
||||
sin_theta_t = np.sin(theta_t)
|
||||
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
||||
s1 = sin_theta_t / sin_theta_0
|
||||
v2 = s0 * v0 + s1 * v1
|
||||
|
||||
if inputs_are_torch:
|
||||
v2 = torch.from_numpy(v2).to(device)
|
||||
|
||||
return v2
|
||||
|
||||
|
||||
@invocation(
|
||||
"lblend",
|
||||
title="Blend Latents",
|
||||
tags=["latents", "blend"],
|
||||
tags=["latents", "blend", "mask"],
|
||||
category="latents",
|
||||
version="1.0.3",
|
||||
version="1.1.0",
|
||||
)
|
||||
class BlendLatentsInvocation(BaseInvocation):
|
||||
"""Blend two latents using a given alpha. Latents must have same size."""
|
||||
"""Blend two latents using a given alpha. If a mask is provided, the second latents will be masked before blending.
|
||||
Latents must have same size. Masking functionality added by @dwringer."""
|
||||
|
||||
latents_a: LatentsField = InputField(
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
latents_b: LatentsField = InputField(
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
alpha: float = InputField(default=0.5, description=FieldDescriptions.blend_alpha)
|
||||
latents_a: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
latents_b: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
mask: Optional[ImageField] = InputField(default=None, description="Mask for blending in latents B")
|
||||
alpha: float = InputField(ge=0, default=0.5, description=FieldDescriptions.blend_alpha)
|
||||
|
||||
def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor:
|
||||
if mask_image.mode != "L":
|
||||
mask_image = mask_image.convert("L")
|
||||
mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
|
||||
if mask_tensor.dim() == 3:
|
||||
mask_tensor = mask_tensor.unsqueeze(0)
|
||||
return mask_tensor
|
||||
|
||||
def replace_tensor_from_masked_tensor(
|
||||
self, tensor: torch.Tensor, other_tensor: torch.Tensor, mask_tensor: torch.Tensor
|
||||
):
|
||||
output = tensor.clone()
|
||||
mask_tensor = mask_tensor.expand(output.shape)
|
||||
if output.dtype != torch.float16:
|
||||
output = torch.add(output, mask_tensor * torch.sub(other_tensor, tensor))
|
||||
else:
|
||||
output = torch.add(output, mask_tensor.half() * torch.sub(other_tensor, tensor))
|
||||
return output
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
latents_a = context.tensors.load(self.latents_a.latents_name)
|
||||
latents_b = context.tensors.load(self.latents_b.latents_name)
|
||||
if self.mask is None:
|
||||
mask_tensor = torch.zeros(latents_a.shape[-2:])
|
||||
else:
|
||||
mask_tensor = self.prep_mask_tensor(context.images.get_pil(self.mask.image_name))
|
||||
mask_tensor = tv_resize(mask_tensor, latents_a.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
|
||||
latents_b = self.replace_tensor_from_masked_tensor(latents_b, latents_a, mask_tensor)
|
||||
|
||||
if latents_a.shape != latents_b.shape:
|
||||
raise Exception("Latents to blend must be the same size.")
|
||||
raise ValueError("Latents to blend must be the same size.")
|
||||
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
def slerp(
|
||||
t: Union[float, npt.NDArray[Any]], # FIXME: maybe use np.float32 here?
|
||||
v0: Union[torch.Tensor, npt.NDArray[Any]],
|
||||
v1: Union[torch.Tensor, npt.NDArray[Any]],
|
||||
DOT_THRESHOLD: float = 0.9995,
|
||||
) -> Union[torch.Tensor, npt.NDArray[Any]]:
|
||||
"""
|
||||
Spherical linear interpolation
|
||||
Args:
|
||||
t (float/np.ndarray): Float value between 0.0 and 1.0
|
||||
v0 (np.ndarray): Starting vector
|
||||
v1 (np.ndarray): Final vector
|
||||
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
||||
colineal. Not recommended to alter this.
|
||||
Returns:
|
||||
v2 (np.ndarray): Interpolation vector between v0 and v1
|
||||
"""
|
||||
inputs_are_torch = False
|
||||
if not isinstance(v0, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v0 = v0.detach().cpu().numpy()
|
||||
if not isinstance(v1, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v1 = v1.detach().cpu().numpy()
|
||||
|
||||
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
||||
if np.abs(dot) > DOT_THRESHOLD:
|
||||
v2 = (1 - t) * v0 + t * v1
|
||||
else:
|
||||
theta_0 = np.arccos(dot)
|
||||
sin_theta_0 = np.sin(theta_0)
|
||||
theta_t = theta_0 * t
|
||||
sin_theta_t = np.sin(theta_t)
|
||||
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
||||
s1 = sin_theta_t / sin_theta_0
|
||||
v2 = s0 * v0 + s1 * v1
|
||||
|
||||
if inputs_are_torch:
|
||||
v2_torch: torch.Tensor = torch.from_numpy(v2).to(device)
|
||||
return v2_torch
|
||||
else:
|
||||
assert isinstance(v2, np.ndarray)
|
||||
return v2
|
||||
|
||||
# blend
|
||||
bl = slerp(self.alpha, latents_a, latents_b)
|
||||
assert isinstance(bl, torch.Tensor)
|
||||
blended_latents: torch.Tensor = bl # for type checking convenience
|
||||
blended_latents = slerp(self.alpha, latents_a, latents_b, device)
|
||||
|
||||
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||
blended_latents = blended_latents.to("cpu")
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
name = context.tensors.save(tensor=blended_latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=blended_latents, seed=self.latents_a.seed)
|
||||
return LatentsOutput.build(latents_name=name, latents=blended_latents)
|
||||
|
||||
1563
invokeai/app/invocations/composition-nodes.py
Normal file
1563
invokeai/app/invocations/composition-nodes.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -250,6 +250,11 @@ class FluxConditioningField(BaseModel):
|
||||
"""A conditioning tensor primitive value"""
|
||||
|
||||
conditioning_name: str = Field(description="The name of conditioning tensor")
|
||||
mask: Optional[TensorField] = Field(
|
||||
default=None,
|
||||
description="The mask associated with this conditioning tensor. Excluded regions should be set to False, "
|
||||
"included regions should be set to True.",
|
||||
)
|
||||
|
||||
|
||||
class SD3ConditioningField(BaseModel):
|
||||
|
||||
@@ -30,6 +30,7 @@ from invokeai.backend.flux.controlnet.xlabs_controlnet_flux import XLabsControlN
|
||||
from invokeai.backend.flux.denoise import denoise
|
||||
from invokeai.backend.flux.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
|
||||
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.ip_adapter.xlabs_ip_adapter_flux import XlabsIpAdapterFlux
|
||||
@@ -42,6 +43,7 @@ from invokeai.backend.flux.sampling_utils import (
|
||||
pack,
|
||||
unpack,
|
||||
)
|
||||
from invokeai.backend.flux.text_conditioning import FluxTextConditioning
|
||||
from invokeai.backend.lora.conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_patcher import LoRAPatcher
|
||||
@@ -87,10 +89,10 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
input=Input.Connection,
|
||||
title="Transformer",
|
||||
)
|
||||
positive_text_conditioning: FluxConditioningField = InputField(
|
||||
positive_text_conditioning: FluxConditioningField | list[FluxConditioningField] = InputField(
|
||||
description=FieldDescriptions.positive_cond, input=Input.Connection
|
||||
)
|
||||
negative_text_conditioning: FluxConditioningField | None = InputField(
|
||||
negative_text_conditioning: FluxConditioningField | list[FluxConditioningField] | None = InputField(
|
||||
default=None,
|
||||
description="Negative conditioning tensor. Can be None if cfg_scale is 1.0.",
|
||||
input=Input.Connection,
|
||||
@@ -139,36 +141,12 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
|
||||
def _load_text_conditioning(
|
||||
self, context: InvocationContext, conditioning_name: str, dtype: torch.dtype
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
# Load the conditioning data.
|
||||
cond_data = context.conditioning.load(conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
flux_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(flux_conditioning, FLUXConditioningInfo)
|
||||
flux_conditioning = flux_conditioning.to(dtype=dtype)
|
||||
t5_embeddings = flux_conditioning.t5_embeds
|
||||
clip_embeddings = flux_conditioning.clip_embeds
|
||||
return t5_embeddings, clip_embeddings
|
||||
|
||||
def _run_diffusion(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
):
|
||||
inference_dtype = torch.bfloat16
|
||||
|
||||
# Load the conditioning data.
|
||||
pos_t5_embeddings, pos_clip_embeddings = self._load_text_conditioning(
|
||||
context, self.positive_text_conditioning.conditioning_name, inference_dtype
|
||||
)
|
||||
neg_t5_embeddings: torch.Tensor | None = None
|
||||
neg_clip_embeddings: torch.Tensor | None = None
|
||||
if self.negative_text_conditioning is not None:
|
||||
neg_t5_embeddings, neg_clip_embeddings = self._load_text_conditioning(
|
||||
context, self.negative_text_conditioning.conditioning_name, inference_dtype
|
||||
)
|
||||
|
||||
# Load the input latents, if provided.
|
||||
init_latents = context.tensors.load(self.latents.latents_name) if self.latents else None
|
||||
if init_latents is not None:
|
||||
@@ -183,15 +161,45 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
dtype=inference_dtype,
|
||||
seed=self.seed,
|
||||
)
|
||||
b, _c, latent_h, latent_w = noise.shape
|
||||
packed_h = latent_h // 2
|
||||
packed_w = latent_w // 2
|
||||
|
||||
# Load the conditioning data.
|
||||
pos_text_conditionings = self._load_text_conditioning(
|
||||
context=context,
|
||||
cond_field=self.positive_text_conditioning,
|
||||
packed_height=packed_h,
|
||||
packed_width=packed_w,
|
||||
dtype=inference_dtype,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
)
|
||||
neg_text_conditionings: list[FluxTextConditioning] | None = None
|
||||
if self.negative_text_conditioning is not None:
|
||||
neg_text_conditionings = self._load_text_conditioning(
|
||||
context=context,
|
||||
cond_field=self.negative_text_conditioning,
|
||||
packed_height=packed_h,
|
||||
packed_width=packed_w,
|
||||
dtype=inference_dtype,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
)
|
||||
pos_regional_prompting_extension = RegionalPromptingExtension.from_text_conditioning(
|
||||
pos_text_conditionings, img_seq_len=packed_h * packed_w
|
||||
)
|
||||
neg_regional_prompting_extension = (
|
||||
RegionalPromptingExtension.from_text_conditioning(neg_text_conditionings, img_seq_len=packed_h * packed_w)
|
||||
if neg_text_conditionings
|
||||
else None
|
||||
)
|
||||
|
||||
transformer_info = context.models.load(self.transformer.transformer)
|
||||
is_schnell = "schnell" in transformer_info.config.config_path
|
||||
|
||||
# Calculate the timestep schedule.
|
||||
image_seq_len = noise.shape[-1] * noise.shape[-2] // 4
|
||||
timesteps = get_schedule(
|
||||
num_steps=self.num_steps,
|
||||
image_seq_len=image_seq_len,
|
||||
image_seq_len=packed_h * packed_w,
|
||||
shift=not is_schnell,
|
||||
)
|
||||
|
||||
@@ -228,28 +236,17 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
inpaint_mask = self._prep_inpaint_mask(context, x)
|
||||
|
||||
b, _c, latent_h, latent_w = x.shape
|
||||
img_ids = generate_img_ids(h=latent_h, w=latent_w, batch_size=b, device=x.device, dtype=x.dtype)
|
||||
|
||||
pos_bs, pos_t5_seq_len, _ = pos_t5_embeddings.shape
|
||||
pos_txt_ids = torch.zeros(
|
||||
pos_bs, pos_t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device()
|
||||
)
|
||||
neg_txt_ids: torch.Tensor | None = None
|
||||
if neg_t5_embeddings is not None:
|
||||
neg_bs, neg_t5_seq_len, _ = neg_t5_embeddings.shape
|
||||
neg_txt_ids = torch.zeros(
|
||||
neg_bs, neg_t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device()
|
||||
)
|
||||
|
||||
# Pack all latent tensors.
|
||||
init_latents = pack(init_latents) if init_latents is not None else None
|
||||
inpaint_mask = pack(inpaint_mask) if inpaint_mask is not None else None
|
||||
noise = pack(noise)
|
||||
x = pack(x)
|
||||
|
||||
# Now that we have 'packed' the latent tensors, verify that we calculated the image_seq_len correctly.
|
||||
assert image_seq_len == x.shape[1]
|
||||
# Now that we have 'packed' the latent tensors, verify that we calculated the image_seq_len, packed_h, and
|
||||
# packed_w correctly.
|
||||
assert packed_h * packed_w == x.shape[1]
|
||||
|
||||
# Prepare inpaint extension.
|
||||
inpaint_extension: InpaintExtension | None = None
|
||||
@@ -338,12 +335,8 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
model=transformer,
|
||||
img=x,
|
||||
img_ids=img_ids,
|
||||
txt=pos_t5_embeddings,
|
||||
txt_ids=pos_txt_ids,
|
||||
vec=pos_clip_embeddings,
|
||||
neg_txt=neg_t5_embeddings,
|
||||
neg_txt_ids=neg_txt_ids,
|
||||
neg_vec=neg_clip_embeddings,
|
||||
pos_regional_prompting_extension=pos_regional_prompting_extension,
|
||||
neg_regional_prompting_extension=neg_regional_prompting_extension,
|
||||
timesteps=timesteps,
|
||||
step_callback=self._build_step_callback(context),
|
||||
guidance=self.guidance,
|
||||
@@ -357,6 +350,43 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
x = unpack(x.float(), self.height, self.width)
|
||||
return x
|
||||
|
||||
def _load_text_conditioning(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
cond_field: FluxConditioningField | list[FluxConditioningField],
|
||||
packed_height: int,
|
||||
packed_width: int,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
) -> list[FluxTextConditioning]:
|
||||
"""Load text conditioning data from a FluxConditioningField or a list of FluxConditioningFields."""
|
||||
# Normalize to a list of FluxConditioningFields.
|
||||
cond_list = [cond_field] if isinstance(cond_field, FluxConditioningField) else cond_field
|
||||
|
||||
text_conditionings: list[FluxTextConditioning] = []
|
||||
for cond_field in cond_list:
|
||||
# Load the text embeddings.
|
||||
cond_data = context.conditioning.load(cond_field.conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
flux_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(flux_conditioning, FLUXConditioningInfo)
|
||||
flux_conditioning = flux_conditioning.to(dtype=dtype, device=device)
|
||||
t5_embeddings = flux_conditioning.t5_embeds
|
||||
clip_embeddings = flux_conditioning.clip_embeds
|
||||
|
||||
# Load the mask, if provided.
|
||||
mask: Optional[torch.Tensor] = None
|
||||
if cond_field.mask is not None:
|
||||
mask = context.tensors.load(cond_field.mask.tensor_name)
|
||||
mask = mask.to(device=device)
|
||||
mask = RegionalPromptingExtension.preprocess_regional_prompt_mask(
|
||||
mask, packed_height, packed_width, dtype, device
|
||||
)
|
||||
|
||||
text_conditionings.append(FluxTextConditioning(t5_embeddings, clip_embeddings, mask))
|
||||
|
||||
return text_conditionings
|
||||
|
||||
@classmethod
|
||||
def prep_cfg_scale(
|
||||
cls, cfg_scale: float | list[float], timesteps: list[float], cfg_scale_start_step: int, cfg_scale_end_step: int
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
from contextlib import ExitStack
|
||||
from typing import Iterator, Literal, Tuple
|
||||
from typing import Iterator, Literal, Optional, Tuple
|
||||
|
||||
import torch
|
||||
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
FluxConditioningField,
|
||||
Input,
|
||||
InputField,
|
||||
TensorField,
|
||||
UIComponent,
|
||||
)
|
||||
from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
||||
from invokeai.app.invocations.primitives import FluxConditioningOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
@@ -22,7 +29,7 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Condit
|
||||
title="FLUX Text Encoding",
|
||||
tags=["prompt", "conditioning", "flux"],
|
||||
category="conditioning",
|
||||
version="1.1.0",
|
||||
version="1.2.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxTextEncoderInvocation(BaseInvocation):
|
||||
@@ -41,7 +48,15 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
t5_max_seq_len: Literal[256, 512] = InputField(
|
||||
description="Max sequence length for the T5 encoder. Expected to be 256 for FLUX schnell models and 512 for FLUX dev models."
|
||||
)
|
||||
prompt: str = InputField(description="Text prompt to encode.")
|
||||
use_short_t5_seq_len: bool = InputField(
|
||||
description="Use a shorter sequence length for the T5 encoder if a short prompt is used. This can improve "
|
||||
+ "performance and reduce peak memory, but may result in slightly different image outputs.",
|
||||
default=True,
|
||||
)
|
||||
prompt: str = InputField(description="Text prompt to encode.", ui_component=UIComponent.Textarea)
|
||||
mask: Optional[TensorField] = InputField(
|
||||
default=None, description="A mask defining the region that this conditioning prompt applies to."
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> FluxConditioningOutput:
|
||||
@@ -54,7 +69,9 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
conditioning_name = context.conditioning.save(conditioning_data)
|
||||
return FluxConditioningOutput.build(conditioning_name)
|
||||
return FluxConditioningOutput(
|
||||
conditioning=FluxConditioningField(conditioning_name=conditioning_name, mask=self.mask)
|
||||
)
|
||||
|
||||
def _t5_encode(self, context: InvocationContext) -> torch.Tensor:
|
||||
t5_tokenizer_info = context.models.load(self.t5_encoder.tokenizer)
|
||||
@@ -62,6 +79,12 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
|
||||
prompt = [self.prompt]
|
||||
|
||||
valid_seq_lens = [self.t5_max_seq_len]
|
||||
if self.use_short_t5_seq_len:
|
||||
# We allow a minimum sequence length of 128. Going too short results in more significant image chagnes.
|
||||
valid_seq_lens = list(range(128, self.t5_max_seq_len, 128))
|
||||
valid_seq_lens.append(self.t5_max_seq_len)
|
||||
|
||||
with (
|
||||
t5_text_encoder_info as t5_text_encoder,
|
||||
t5_tokenizer_info as t5_tokenizer,
|
||||
@@ -69,10 +92,10 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
assert isinstance(t5_text_encoder, T5EncoderModel)
|
||||
assert isinstance(t5_tokenizer, T5Tokenizer)
|
||||
|
||||
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
|
||||
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False)
|
||||
|
||||
context.util.signal_progress("Running T5 encoder")
|
||||
prompt_embeds = t5_encoder(prompt)
|
||||
prompt_embeds = t5_encoder(prompt, valid_seq_lens)
|
||||
|
||||
assert isinstance(prompt_embeds, torch.Tensor)
|
||||
return prompt_embeds
|
||||
@@ -110,10 +133,10 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
# There are currently no supported CLIP quantized models. Add support here if needed.
|
||||
raise ValueError(f"Unsupported model format: {clip_text_encoder_config.format}")
|
||||
|
||||
clip_encoder = HFEncoder(clip_text_encoder, clip_tokenizer, True, 77)
|
||||
clip_encoder = HFEncoder(clip_text_encoder, clip_tokenizer, True)
|
||||
|
||||
context.util.signal_progress("Running CLIP encoder")
|
||||
pooled_prompt_embeds = clip_encoder(prompt)
|
||||
pooled_prompt_embeds = clip_encoder(prompt, [77])
|
||||
|
||||
assert isinstance(pooled_prompt_embeds, torch.Tensor)
|
||||
return pooled_prompt_embeds
|
||||
|
||||
59
invokeai/app/invocations/image_panels.py
Normal file
59
invokeai/app/invocations/image_panels.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from pydantic import ValidationInfo, field_validator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import InputField, OutputField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
|
||||
|
||||
@invocation_output("image_panel_coordinate_output")
|
||||
class ImagePanelCoordinateOutput(BaseInvocationOutput):
|
||||
x_left: int = OutputField(description="The left x-coordinate of the panel.")
|
||||
y_top: int = OutputField(description="The top y-coordinate of the panel.")
|
||||
width: int = OutputField(description="The width of the panel.")
|
||||
height: int = OutputField(description="The height of the panel.")
|
||||
|
||||
|
||||
@invocation(
|
||||
"image_panel_layout",
|
||||
title="Image Panel Layout",
|
||||
tags=["image", "panel", "layout"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class ImagePanelLayoutInvocation(BaseInvocation):
|
||||
"""Get the coordinates of a single panel in a grid. (If the full image shape cannot be divided evenly into panels,
|
||||
then the grid may not cover the entire image.)
|
||||
"""
|
||||
|
||||
width: int = InputField(description="The width of the entire grid.")
|
||||
height: int = InputField(description="The height of the entire grid.")
|
||||
num_cols: int = InputField(ge=1, default=1, description="The number of columns in the grid.")
|
||||
num_rows: int = InputField(ge=1, default=1, description="The number of rows in the grid.")
|
||||
panel_col_idx: int = InputField(ge=0, default=0, description="The column index of the panel to be processed.")
|
||||
panel_row_idx: int = InputField(ge=0, default=0, description="The row index of the panel to be processed.")
|
||||
|
||||
@field_validator("panel_col_idx")
|
||||
def validate_panel_col_idx(cls, v: int, info: ValidationInfo) -> int:
|
||||
if v < 0 or v >= info.data["num_cols"]:
|
||||
raise ValueError(f"panel_col_idx must be between 0 and {info.data['num_cols'] - 1}")
|
||||
return v
|
||||
|
||||
@field_validator("panel_row_idx")
|
||||
def validate_panel_row_idx(cls, v: int, info: ValidationInfo) -> int:
|
||||
if v < 0 or v >= info.data["num_rows"]:
|
||||
raise ValueError(f"panel_row_idx must be between 0 and {info.data['num_rows'] - 1}")
|
||||
return v
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImagePanelCoordinateOutput:
|
||||
x_left = self.panel_col_idx * (self.width // self.num_cols)
|
||||
y_top = self.panel_row_idx * (self.height // self.num_rows)
|
||||
width = self.width // self.num_cols
|
||||
height = self.height // self.num_rows
|
||||
return ImagePanelCoordinateOutput(x_left=x_left, y_top=y_top, width=width, height=height)
|
||||
@@ -86,7 +86,7 @@ class ModelLoadService(ModelLoadServiceBase):
|
||||
|
||||
def torch_load_file(checkpoint: Path) -> AnyModel:
|
||||
scan_result = scan_file_path(checkpoint)
|
||||
if scan_result.infected_files != 0:
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise Exception("The model at {checkpoint} is potentially infected by malware. Aborting load.")
|
||||
result = torch_load(checkpoint, map_location="cpu")
|
||||
return result
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import einops
|
||||
import torch
|
||||
|
||||
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.math import attention
|
||||
from invokeai.backend.flux.modules.layers import DoubleStreamBlock
|
||||
from invokeai.backend.flux.modules.layers import DoubleStreamBlock, SingleStreamBlock
|
||||
|
||||
|
||||
class CustomDoubleStreamBlockProcessor:
|
||||
@@ -13,7 +14,12 @@ class CustomDoubleStreamBlockProcessor:
|
||||
|
||||
@staticmethod
|
||||
def _double_stream_block_forward(
|
||||
block: DoubleStreamBlock, img: torch.Tensor, txt: torch.Tensor, vec: torch.Tensor, pe: torch.Tensor
|
||||
block: DoubleStreamBlock,
|
||||
img: torch.Tensor,
|
||||
txt: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
attn_mask: torch.Tensor | None = None,
|
||||
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""This function is a direct copy of DoubleStreamBlock.forward(), but it returns some of the intermediate
|
||||
values.
|
||||
@@ -40,7 +46,7 @@ class CustomDoubleStreamBlockProcessor:
|
||||
k = torch.cat((txt_k, img_k), dim=2)
|
||||
v = torch.cat((txt_v, img_v), dim=2)
|
||||
|
||||
attn = attention(q, k, v, pe=pe)
|
||||
attn = attention(q, k, v, pe=pe, attn_mask=attn_mask)
|
||||
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]
|
||||
|
||||
# calculate the img bloks
|
||||
@@ -63,11 +69,15 @@ class CustomDoubleStreamBlockProcessor:
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
ip_adapter_extensions: list[XLabsIPAdapterExtension],
|
||||
regional_prompting_extension: RegionalPromptingExtension,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""A custom implementation of DoubleStreamBlock.forward() with additional features:
|
||||
- IP-Adapter support
|
||||
"""
|
||||
img, txt, img_q = CustomDoubleStreamBlockProcessor._double_stream_block_forward(block, img, txt, vec, pe)
|
||||
attn_mask = regional_prompting_extension.get_double_stream_attn_mask(block_index)
|
||||
img, txt, img_q = CustomDoubleStreamBlockProcessor._double_stream_block_forward(
|
||||
block, img, txt, vec, pe, attn_mask=attn_mask
|
||||
)
|
||||
|
||||
# Apply IP-Adapter conditioning.
|
||||
for ip_adapter_extension in ip_adapter_extensions:
|
||||
@@ -81,3 +91,48 @@ class CustomDoubleStreamBlockProcessor:
|
||||
)
|
||||
|
||||
return img, txt
|
||||
|
||||
|
||||
class CustomSingleStreamBlockProcessor:
|
||||
"""A class containing a custom implementation of SingleStreamBlock.forward() with additional features (masking,
|
||||
etc.)
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _single_stream_block_forward(
|
||||
block: SingleStreamBlock,
|
||||
x: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
attn_mask: torch.Tensor | None = None,
|
||||
) -> torch.Tensor:
|
||||
"""This function is a direct copy of SingleStreamBlock.forward()."""
|
||||
mod, _ = block.modulation(vec)
|
||||
x_mod = (1 + mod.scale) * block.pre_norm(x) + mod.shift
|
||||
qkv, mlp = torch.split(block.linear1(x_mod), [3 * block.hidden_size, block.mlp_hidden_dim], dim=-1)
|
||||
|
||||
q, k, v = einops.rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=block.num_heads)
|
||||
q, k = block.norm(q, k, v)
|
||||
|
||||
# compute attention
|
||||
attn = attention(q, k, v, pe=pe, attn_mask=attn_mask)
|
||||
# compute activation in mlp stream, cat again and run second linear layer
|
||||
output = block.linear2(torch.cat((attn, block.mlp_act(mlp)), 2))
|
||||
return x + mod.gate * output
|
||||
|
||||
@staticmethod
|
||||
def custom_single_block_forward(
|
||||
timestep_index: int,
|
||||
total_num_timesteps: int,
|
||||
block_index: int,
|
||||
block: SingleStreamBlock,
|
||||
img: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
regional_prompting_extension: RegionalPromptingExtension,
|
||||
) -> torch.Tensor:
|
||||
"""A custom implementation of SingleStreamBlock.forward() with additional features:
|
||||
- Masking
|
||||
"""
|
||||
attn_mask = regional_prompting_extension.get_single_stream_attn_mask(block_index)
|
||||
return CustomSingleStreamBlockProcessor._single_stream_block_forward(block, img, vec, pe, attn_mask=attn_mask)
|
||||
|
||||
@@ -7,6 +7,7 @@ from tqdm import tqdm
|
||||
from invokeai.backend.flux.controlnet.controlnet_flux_output import ControlNetFluxOutput, sum_controlnet_flux_outputs
|
||||
from invokeai.backend.flux.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
|
||||
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.model import Flux
|
||||
@@ -18,14 +19,8 @@ def denoise(
|
||||
# model input
|
||||
img: torch.Tensor,
|
||||
img_ids: torch.Tensor,
|
||||
# positive text conditioning
|
||||
txt: torch.Tensor,
|
||||
txt_ids: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
# negative text conditioning
|
||||
neg_txt: torch.Tensor | None,
|
||||
neg_txt_ids: torch.Tensor | None,
|
||||
neg_vec: torch.Tensor | None,
|
||||
pos_regional_prompting_extension: RegionalPromptingExtension,
|
||||
neg_regional_prompting_extension: RegionalPromptingExtension | None,
|
||||
# sampling parameters
|
||||
timesteps: list[float],
|
||||
step_callback: Callable[[PipelineIntermediateState], None],
|
||||
@@ -61,9 +56,9 @@ def denoise(
|
||||
total_num_timesteps=total_steps,
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=txt,
|
||||
txt_ids=txt_ids,
|
||||
y=vec,
|
||||
txt=pos_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
|
||||
txt_ids=pos_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
|
||||
y=pos_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
)
|
||||
@@ -78,9 +73,9 @@ def denoise(
|
||||
pred = model(
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=txt,
|
||||
txt_ids=txt_ids,
|
||||
y=vec,
|
||||
txt=pos_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
|
||||
txt_ids=pos_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
|
||||
y=pos_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
timestep_index=step_index,
|
||||
@@ -88,6 +83,7 @@ def denoise(
|
||||
controlnet_double_block_residuals=merged_controlnet_residuals.double_block_residuals,
|
||||
controlnet_single_block_residuals=merged_controlnet_residuals.single_block_residuals,
|
||||
ip_adapter_extensions=pos_ip_adapter_extensions,
|
||||
regional_prompting_extension=pos_regional_prompting_extension,
|
||||
)
|
||||
|
||||
step_cfg_scale = cfg_scale[step_index]
|
||||
@@ -97,15 +93,15 @@ def denoise(
|
||||
# TODO(ryand): Add option to run positive and negative predictions in a single batch for better performance
|
||||
# on systems with sufficient VRAM.
|
||||
|
||||
if neg_txt is None or neg_txt_ids is None or neg_vec is None:
|
||||
if neg_regional_prompting_extension is None:
|
||||
raise ValueError("Negative text conditioning is required when cfg_scale is not 1.0.")
|
||||
|
||||
neg_pred = model(
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=neg_txt,
|
||||
txt_ids=neg_txt_ids,
|
||||
y=neg_vec,
|
||||
txt=neg_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
|
||||
txt_ids=neg_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
|
||||
y=neg_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
timestep_index=step_index,
|
||||
@@ -113,6 +109,7 @@ def denoise(
|
||||
controlnet_double_block_residuals=None,
|
||||
controlnet_single_block_residuals=None,
|
||||
ip_adapter_extensions=neg_ip_adapter_extensions,
|
||||
regional_prompting_extension=neg_regional_prompting_extension,
|
||||
)
|
||||
pred = neg_pred + step_cfg_scale * (pred - neg_pred)
|
||||
|
||||
|
||||
276
invokeai/backend/flux/extensions/regional_prompting_extension.py
Normal file
276
invokeai/backend/flux/extensions/regional_prompting_extension.py
Normal file
@@ -0,0 +1,276 @@
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torchvision
|
||||
|
||||
from invokeai.backend.flux.text_conditioning import FluxRegionalTextConditioning, FluxTextConditioning
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Range
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.mask import to_standard_float_mask
|
||||
|
||||
|
||||
class RegionalPromptingExtension:
|
||||
"""A class for managing regional prompting with FLUX.
|
||||
|
||||
This implementation is inspired by https://arxiv.org/pdf/2411.02395 (though there are significant differences).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
regional_text_conditioning: FluxRegionalTextConditioning,
|
||||
restricted_attn_mask: torch.Tensor | None = None,
|
||||
):
|
||||
self.regional_text_conditioning = regional_text_conditioning
|
||||
self.restricted_attn_mask = restricted_attn_mask
|
||||
|
||||
def get_double_stream_attn_mask(self, block_index: int) -> torch.Tensor | None:
|
||||
order = [self.restricted_attn_mask, None]
|
||||
return order[block_index % len(order)]
|
||||
|
||||
def get_single_stream_attn_mask(self, block_index: int) -> torch.Tensor | None:
|
||||
order = [self.restricted_attn_mask, None]
|
||||
return order[block_index % len(order)]
|
||||
|
||||
@classmethod
|
||||
def from_text_conditioning(cls, text_conditioning: list[FluxTextConditioning], img_seq_len: int):
|
||||
"""Create a RegionalPromptingExtension from a list of text conditionings.
|
||||
|
||||
Args:
|
||||
text_conditioning (list[FluxTextConditioning]): The text conditionings to use for regional prompting.
|
||||
img_seq_len (int): The image sequence length (i.e. packed_height * packed_width).
|
||||
"""
|
||||
regional_text_conditioning = cls._concat_regional_text_conditioning(text_conditioning)
|
||||
attn_mask_with_restricted_img_self_attn = cls._prepare_restricted_attn_mask(
|
||||
regional_text_conditioning, img_seq_len
|
||||
)
|
||||
return cls(
|
||||
regional_text_conditioning=regional_text_conditioning,
|
||||
restricted_attn_mask=attn_mask_with_restricted_img_self_attn,
|
||||
)
|
||||
|
||||
# Keeping _prepare_unrestricted_attn_mask for reference as an alternative masking strategy:
|
||||
#
|
||||
# @classmethod
|
||||
# def _prepare_unrestricted_attn_mask(
|
||||
# cls,
|
||||
# regional_text_conditioning: FluxRegionalTextConditioning,
|
||||
# img_seq_len: int,
|
||||
# ) -> torch.Tensor:
|
||||
# """Prepare an 'unrestricted' attention mask. In this context, 'unrestricted' means that:
|
||||
# - img self-attention is not masked.
|
||||
# - img regions attend to both txt within their own region and to global prompts.
|
||||
# """
|
||||
# device = TorchDevice.choose_torch_device()
|
||||
|
||||
# # Infer txt_seq_len from the t5_embeddings tensor.
|
||||
# txt_seq_len = regional_text_conditioning.t5_embeddings.shape[1]
|
||||
|
||||
# # In the attention blocks, the txt seq and img seq are concatenated and then attention is applied.
|
||||
# # Concatenation happens in the following order: [txt_seq, img_seq].
|
||||
# # There are 4 portions of the attention mask to consider as we prepare it:
|
||||
# # 1. txt attends to itself
|
||||
# # 2. txt attends to corresponding regional img
|
||||
# # 3. regional img attends to corresponding txt
|
||||
# # 4. regional img attends to itself
|
||||
|
||||
# # Initialize empty attention mask.
|
||||
# regional_attention_mask = torch.zeros(
|
||||
# (txt_seq_len + img_seq_len, txt_seq_len + img_seq_len), device=device, dtype=torch.float16
|
||||
# )
|
||||
|
||||
# for image_mask, t5_embedding_range in zip(
|
||||
# regional_text_conditioning.image_masks, regional_text_conditioning.t5_embedding_ranges, strict=True
|
||||
# ):
|
||||
# # 1. txt attends to itself
|
||||
# regional_attention_mask[
|
||||
# t5_embedding_range.start : t5_embedding_range.end, t5_embedding_range.start : t5_embedding_range.end
|
||||
# ] = 1.0
|
||||
|
||||
# # 2. txt attends to corresponding regional img
|
||||
# # Note that we reshape to (1, img_seq_len) to ensure broadcasting works as desired.
|
||||
# fill_value = image_mask.view(1, img_seq_len) if image_mask is not None else 1.0
|
||||
# regional_attention_mask[t5_embedding_range.start : t5_embedding_range.end, txt_seq_len:] = fill_value
|
||||
|
||||
# # 3. regional img attends to corresponding txt
|
||||
# # Note that we reshape to (img_seq_len, 1) to ensure broadcasting works as desired.
|
||||
# fill_value = image_mask.view(img_seq_len, 1) if image_mask is not None else 1.0
|
||||
# regional_attention_mask[txt_seq_len:, t5_embedding_range.start : t5_embedding_range.end] = fill_value
|
||||
|
||||
# # 4. regional img attends to itself
|
||||
# # Allow unrestricted img self attention.
|
||||
# regional_attention_mask[txt_seq_len:, txt_seq_len:] = 1.0
|
||||
|
||||
# # Convert attention mask to boolean.
|
||||
# regional_attention_mask = regional_attention_mask > 0.5
|
||||
|
||||
# return regional_attention_mask
|
||||
|
||||
@classmethod
|
||||
def _prepare_restricted_attn_mask(
|
||||
cls,
|
||||
regional_text_conditioning: FluxRegionalTextConditioning,
|
||||
img_seq_len: int,
|
||||
) -> torch.Tensor | None:
|
||||
"""Prepare a 'restricted' attention mask. In this context, 'restricted' means that:
|
||||
- img self-attention is only allowed within regions.
|
||||
- img regions only attend to txt within their own region, not to global prompts.
|
||||
"""
|
||||
# Identify background region. I.e. the region that is not covered by any region masks.
|
||||
background_region_mask: None | torch.Tensor = None
|
||||
for image_mask in regional_text_conditioning.image_masks:
|
||||
if image_mask is not None:
|
||||
if background_region_mask is None:
|
||||
background_region_mask = torch.ones_like(image_mask)
|
||||
background_region_mask *= 1 - image_mask
|
||||
|
||||
if background_region_mask is None:
|
||||
# There are no region masks, short-circuit and return None.
|
||||
# TODO(ryand): We could restrict txt-txt attention across multiple global prompts, but this would
|
||||
# is a rare use case and would make the logic here significantly more complicated.
|
||||
return None
|
||||
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
# Infer txt_seq_len from the t5_embeddings tensor.
|
||||
txt_seq_len = regional_text_conditioning.t5_embeddings.shape[1]
|
||||
|
||||
# In the attention blocks, the txt seq and img seq are concatenated and then attention is applied.
|
||||
# Concatenation happens in the following order: [txt_seq, img_seq].
|
||||
# There are 4 portions of the attention mask to consider as we prepare it:
|
||||
# 1. txt attends to itself
|
||||
# 2. txt attends to corresponding regional img
|
||||
# 3. regional img attends to corresponding txt
|
||||
# 4. regional img attends to itself
|
||||
|
||||
# Initialize empty attention mask.
|
||||
regional_attention_mask = torch.zeros(
|
||||
(txt_seq_len + img_seq_len, txt_seq_len + img_seq_len), device=device, dtype=torch.float16
|
||||
)
|
||||
|
||||
for image_mask, t5_embedding_range in zip(
|
||||
regional_text_conditioning.image_masks, regional_text_conditioning.t5_embedding_ranges, strict=True
|
||||
):
|
||||
# 1. txt attends to itself
|
||||
regional_attention_mask[
|
||||
t5_embedding_range.start : t5_embedding_range.end, t5_embedding_range.start : t5_embedding_range.end
|
||||
] = 1.0
|
||||
|
||||
if image_mask is not None:
|
||||
# 2. txt attends to corresponding regional img
|
||||
# Note that we reshape to (1, img_seq_len) to ensure broadcasting works as desired.
|
||||
regional_attention_mask[t5_embedding_range.start : t5_embedding_range.end, txt_seq_len:] = (
|
||||
image_mask.view(1, img_seq_len)
|
||||
)
|
||||
|
||||
# 3. regional img attends to corresponding txt
|
||||
# Note that we reshape to (img_seq_len, 1) to ensure broadcasting works as desired.
|
||||
regional_attention_mask[txt_seq_len:, t5_embedding_range.start : t5_embedding_range.end] = (
|
||||
image_mask.view(img_seq_len, 1)
|
||||
)
|
||||
|
||||
# 4. regional img attends to itself
|
||||
image_mask = image_mask.view(img_seq_len, 1)
|
||||
regional_attention_mask[txt_seq_len:, txt_seq_len:] += image_mask @ image_mask.T
|
||||
else:
|
||||
# We don't allow attention between non-background image regions and global prompts. This helps to ensure
|
||||
# that regions focus on their local prompts. We do, however, allow attention between background regions
|
||||
# and global prompts. If we didn't do this, then the background regions would not attend to any txt
|
||||
# embeddings, which we found experimentally to cause artifacts.
|
||||
|
||||
# 2. global txt attends to background region
|
||||
# Note that we reshape to (1, img_seq_len) to ensure broadcasting works as desired.
|
||||
regional_attention_mask[t5_embedding_range.start : t5_embedding_range.end, txt_seq_len:] = (
|
||||
background_region_mask.view(1, img_seq_len)
|
||||
)
|
||||
|
||||
# 3. background region attends to global txt
|
||||
# Note that we reshape to (img_seq_len, 1) to ensure broadcasting works as desired.
|
||||
regional_attention_mask[txt_seq_len:, t5_embedding_range.start : t5_embedding_range.end] = (
|
||||
background_region_mask.view(img_seq_len, 1)
|
||||
)
|
||||
|
||||
# Allow background regions to attend to themselves.
|
||||
regional_attention_mask[txt_seq_len:, txt_seq_len:] += background_region_mask.view(img_seq_len, 1)
|
||||
regional_attention_mask[txt_seq_len:, txt_seq_len:] += background_region_mask.view(1, img_seq_len)
|
||||
|
||||
# Convert attention mask to boolean.
|
||||
regional_attention_mask = regional_attention_mask > 0.5
|
||||
|
||||
return regional_attention_mask
|
||||
|
||||
@classmethod
|
||||
def _concat_regional_text_conditioning(
|
||||
cls,
|
||||
text_conditionings: list[FluxTextConditioning],
|
||||
) -> FluxRegionalTextConditioning:
|
||||
"""Concatenate regional text conditioning data into a single conditioning tensor (with associated masks)."""
|
||||
concat_t5_embeddings: list[torch.Tensor] = []
|
||||
concat_t5_embedding_ranges: list[Range] = []
|
||||
image_masks: list[torch.Tensor | None] = []
|
||||
|
||||
# Choose global CLIP embedding.
|
||||
# Use the first global prompt's CLIP embedding as the global CLIP embedding. If there is no global prompt, use
|
||||
# the first prompt's CLIP embedding.
|
||||
global_clip_embedding: torch.Tensor = text_conditionings[0].clip_embeddings
|
||||
for text_conditioning in text_conditionings:
|
||||
if text_conditioning.mask is None:
|
||||
global_clip_embedding = text_conditioning.clip_embeddings
|
||||
break
|
||||
|
||||
cur_t5_embedding_len = 0
|
||||
for text_conditioning in text_conditionings:
|
||||
concat_t5_embeddings.append(text_conditioning.t5_embeddings)
|
||||
|
||||
concat_t5_embedding_ranges.append(
|
||||
Range(start=cur_t5_embedding_len, end=cur_t5_embedding_len + text_conditioning.t5_embeddings.shape[1])
|
||||
)
|
||||
|
||||
image_masks.append(text_conditioning.mask)
|
||||
|
||||
cur_t5_embedding_len += text_conditioning.t5_embeddings.shape[1]
|
||||
|
||||
t5_embeddings = torch.cat(concat_t5_embeddings, dim=1)
|
||||
|
||||
# Initialize the txt_ids tensor.
|
||||
pos_bs, pos_t5_seq_len, _ = t5_embeddings.shape
|
||||
t5_txt_ids = torch.zeros(
|
||||
pos_bs, pos_t5_seq_len, 3, dtype=t5_embeddings.dtype, device=TorchDevice.choose_torch_device()
|
||||
)
|
||||
|
||||
return FluxRegionalTextConditioning(
|
||||
t5_embeddings=t5_embeddings,
|
||||
clip_embeddings=global_clip_embedding,
|
||||
t5_txt_ids=t5_txt_ids,
|
||||
image_masks=image_masks,
|
||||
t5_embedding_ranges=concat_t5_embedding_ranges,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def preprocess_regional_prompt_mask(
|
||||
mask: Optional[torch.Tensor], packed_height: int, packed_width: int, dtype: torch.dtype, device: torch.device
|
||||
) -> torch.Tensor:
|
||||
"""Preprocess a regional prompt mask to match the target height and width.
|
||||
If mask is None, returns a mask of all ones with the target height and width.
|
||||
If mask is not None, resizes the mask to the target height and width using 'nearest' interpolation.
|
||||
|
||||
packed_height and packed_width are the target height and width of the mask in the 'packed' latent space.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: The processed mask. shape: (1, 1, packed_height * packed_width).
|
||||
"""
|
||||
|
||||
if mask is None:
|
||||
return torch.ones((1, 1, packed_height * packed_width), dtype=dtype, device=device)
|
||||
|
||||
mask = to_standard_float_mask(mask, out_dtype=dtype)
|
||||
|
||||
tf = torchvision.transforms.Resize(
|
||||
(packed_height, packed_width), interpolation=torchvision.transforms.InterpolationMode.NEAREST
|
||||
)
|
||||
|
||||
# Add a batch dimension to the mask, because torchvision expects shape (batch, channels, h, w).
|
||||
mask = mask.unsqueeze(0) # Shape: (1, h, w) -> (1, 1, h, w)
|
||||
resized_mask = tf(mask)
|
||||
|
||||
# Flatten the height and width dimensions into a single image_seq_len dimension.
|
||||
return resized_mask.flatten(start_dim=2)
|
||||
@@ -5,10 +5,10 @@ from einops import rearrange
|
||||
from torch import Tensor
|
||||
|
||||
|
||||
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor:
|
||||
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, attn_mask: Tensor | None = None) -> Tensor:
|
||||
q, k = apply_rope(q, k, pe)
|
||||
|
||||
x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
|
||||
x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask)
|
||||
x = rearrange(x, "B H L D -> B L (H D)")
|
||||
|
||||
return x
|
||||
|
||||
@@ -5,7 +5,11 @@ from dataclasses import dataclass
|
||||
import torch
|
||||
from torch import Tensor, nn
|
||||
|
||||
from invokeai.backend.flux.custom_block_processor import CustomDoubleStreamBlockProcessor
|
||||
from invokeai.backend.flux.custom_block_processor import (
|
||||
CustomDoubleStreamBlockProcessor,
|
||||
CustomSingleStreamBlockProcessor,
|
||||
)
|
||||
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.modules.layers import (
|
||||
DoubleStreamBlock,
|
||||
@@ -95,6 +99,7 @@ class Flux(nn.Module):
|
||||
controlnet_double_block_residuals: list[Tensor] | None,
|
||||
controlnet_single_block_residuals: list[Tensor] | None,
|
||||
ip_adapter_extensions: list[XLabsIPAdapterExtension],
|
||||
regional_prompting_extension: RegionalPromptingExtension,
|
||||
) -> Tensor:
|
||||
if img.ndim != 3 or txt.ndim != 3:
|
||||
raise ValueError("Input img and txt tensors must have 3 dimensions.")
|
||||
@@ -117,7 +122,6 @@ class Flux(nn.Module):
|
||||
assert len(controlnet_double_block_residuals) == len(self.double_blocks)
|
||||
for block_index, block in enumerate(self.double_blocks):
|
||||
assert isinstance(block, DoubleStreamBlock)
|
||||
|
||||
img, txt = CustomDoubleStreamBlockProcessor.custom_double_block_forward(
|
||||
timestep_index=timestep_index,
|
||||
total_num_timesteps=total_num_timesteps,
|
||||
@@ -128,6 +132,7 @@ class Flux(nn.Module):
|
||||
vec=vec,
|
||||
pe=pe,
|
||||
ip_adapter_extensions=ip_adapter_extensions,
|
||||
regional_prompting_extension=regional_prompting_extension,
|
||||
)
|
||||
|
||||
if controlnet_double_block_residuals is not None:
|
||||
@@ -140,7 +145,17 @@ class Flux(nn.Module):
|
||||
assert len(controlnet_single_block_residuals) == len(self.single_blocks)
|
||||
|
||||
for block_index, block in enumerate(self.single_blocks):
|
||||
img = block(img, vec=vec, pe=pe)
|
||||
assert isinstance(block, SingleStreamBlock)
|
||||
img = CustomSingleStreamBlockProcessor.custom_single_block_forward(
|
||||
timestep_index=timestep_index,
|
||||
total_num_timesteps=total_num_timesteps,
|
||||
block_index=block_index,
|
||||
block=block,
|
||||
img=img,
|
||||
vec=vec,
|
||||
pe=pe,
|
||||
regional_prompting_extension=regional_prompting_extension,
|
||||
)
|
||||
|
||||
if controlnet_single_block_residuals is not None:
|
||||
img[:, txt.shape[1] :, ...] += controlnet_single_block_residuals[block_index]
|
||||
|
||||
@@ -1,32 +1,53 @@
|
||||
# Initially pulled from https://github.com/black-forest-labs/flux
|
||||
|
||||
|
||||
from torch import Tensor, nn
|
||||
from transformers import PreTrainedModel, PreTrainedTokenizer
|
||||
|
||||
|
||||
class HFEncoder(nn.Module):
|
||||
def __init__(self, encoder: PreTrainedModel, tokenizer: PreTrainedTokenizer, is_clip: bool, max_length: int):
|
||||
def __init__(self, encoder: PreTrainedModel, tokenizer: PreTrainedTokenizer, is_clip: bool):
|
||||
super().__init__()
|
||||
self.max_length = max_length
|
||||
self.is_clip = is_clip
|
||||
self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"
|
||||
self.tokenizer = tokenizer
|
||||
self.hf_module = encoder
|
||||
self.hf_module = self.hf_module.eval().requires_grad_(False)
|
||||
|
||||
def forward(self, text: list[str]) -> Tensor:
|
||||
def forward(self, text: list[str], valid_seq_lens: list[int]) -> Tensor:
|
||||
"""Encode text into a tensor.
|
||||
|
||||
Args:
|
||||
text: A list of text prompts to encode.
|
||||
valid_seq_lens: A list of valid sequence lengths. The shortest valid sequence length that can contain the
|
||||
text will be used. If the largest valid sequence length cannot contain the text, the encoding will be
|
||||
truncated.
|
||||
"""
|
||||
valid_seq_lens = sorted(valid_seq_lens)
|
||||
|
||||
# Perform initial encoding with the maximum valid sequence length.
|
||||
batch_encoding = self.tokenizer(
|
||||
text,
|
||||
truncation=True,
|
||||
max_length=self.max_length,
|
||||
return_length=False,
|
||||
max_length=max(valid_seq_lens),
|
||||
return_length=True,
|
||||
return_overflowing_tokens=False,
|
||||
padding="max_length",
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Find selected_seq_len, the minimum valid sequence length that can contain all of the input tokens.
|
||||
seq_len: int = batch_encoding["length"][0].item()
|
||||
selected_seq_len = valid_seq_lens[-1]
|
||||
for len in valid_seq_lens:
|
||||
if len >= seq_len:
|
||||
selected_seq_len = len
|
||||
break
|
||||
|
||||
input_ids = batch_encoding["input_ids"][..., :selected_seq_len]
|
||||
|
||||
outputs = self.hf_module(
|
||||
input_ids=batch_encoding["input_ids"].to(self.hf_module.device),
|
||||
input_ids=input_ids.to(self.hf_module.device),
|
||||
attention_mask=None,
|
||||
output_hidden_states=False,
|
||||
)
|
||||
|
||||
36
invokeai/backend/flux/text_conditioning.py
Normal file
36
invokeai/backend/flux/text_conditioning.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Range
|
||||
|
||||
|
||||
@dataclass
|
||||
class FluxTextConditioning:
|
||||
t5_embeddings: torch.Tensor
|
||||
clip_embeddings: torch.Tensor
|
||||
# If mask is None, the prompt is a global prompt.
|
||||
mask: torch.Tensor | None
|
||||
|
||||
|
||||
@dataclass
|
||||
class FluxRegionalTextConditioning:
|
||||
# Concatenated text embeddings.
|
||||
# Shape: (1, concatenated_txt_seq_len, 4096)
|
||||
t5_embeddings: torch.Tensor
|
||||
# Shape: (1, concatenated_txt_seq_len, 3)
|
||||
t5_txt_ids: torch.Tensor
|
||||
|
||||
# Global CLIP embeddings.
|
||||
# Shape: (1, 768)
|
||||
clip_embeddings: torch.Tensor
|
||||
|
||||
# A binary mask indicating the regions of the image that the prompt should be applied to. If None, the prompt is a
|
||||
# global prompt.
|
||||
# image_masks[i] is the mask for the ith prompt.
|
||||
# image_masks[i] has shape (1, image_seq_len) and dtype torch.bool.
|
||||
image_masks: list[torch.Tensor | None]
|
||||
|
||||
# List of ranges that represent the embedding ranges for each mask.
|
||||
# t5_embedding_ranges[i] contains the range of the t5 embeddings that correspond to image_masks[i].
|
||||
t5_embedding_ranges: list[Range]
|
||||
BIN
invokeai/backend/image_util/assets/CIELab_to_UPLab.icc
Normal file
BIN
invokeai/backend/image_util/assets/CIELab_to_UPLab.icc
Normal file
Binary file not shown.
1020
invokeai/backend/image_util/composition.py
Normal file
1020
invokeai/backend/image_util/composition.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -469,7 +469,7 @@ class ModelProbe(object):
|
||||
"""
|
||||
# scan model
|
||||
scan_result = scan_file_path(checkpoint)
|
||||
if scan_result.infected_files != 0:
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise Exception("The model {model_name} is potentially infected by malware. Aborting import.")
|
||||
|
||||
|
||||
@@ -485,6 +485,7 @@ MODEL_NAME_TO_PREPROCESSOR = {
|
||||
"lineart anime": "lineart_anime_image_processor",
|
||||
"lineart_anime": "lineart_anime_image_processor",
|
||||
"lineart": "lineart_image_processor",
|
||||
"soft": "hed_image_processor",
|
||||
"softedge": "hed_image_processor",
|
||||
"hed": "hed_image_processor",
|
||||
"shuffle": "content_shuffle_image_processor",
|
||||
|
||||
@@ -44,7 +44,7 @@ def _fast_safetensors_reader(path: str) -> Dict[str, torch.Tensor]:
|
||||
return checkpoint
|
||||
|
||||
|
||||
def read_checkpoint_meta(path: Union[str, Path], scan: bool = False) -> Dict[str, torch.Tensor]:
|
||||
def read_checkpoint_meta(path: Union[str, Path], scan: bool = True) -> Dict[str, torch.Tensor]:
|
||||
if str(path).endswith(".safetensors"):
|
||||
try:
|
||||
path_str = path.as_posix() if isinstance(path, Path) else path
|
||||
@@ -55,7 +55,7 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = False) -> Dict[str
|
||||
else:
|
||||
if scan:
|
||||
scan_result = scan_file_path(path)
|
||||
if scan_result.infected_files != 0:
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise Exception(f'The model file "{path}" is potentially infected by malware. Aborting import.')
|
||||
if str(path).endswith(".gguf"):
|
||||
# The GGUF reader used here uses numpy memmap, so these tensors are not loaded into memory during this function
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
"@dagrejs/dagre": "^1.1.4",
|
||||
"@dagrejs/graphlib": "^2.2.4",
|
||||
"@fontsource-variable/inter": "^5.1.0",
|
||||
"@invoke-ai/ui-library": "^0.0.43",
|
||||
"@invoke-ai/ui-library": "^0.0.44",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
|
||||
76
invokeai/frontend/web/pnpm-lock.yaml
generated
76
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -24,8 +24,8 @@ dependencies:
|
||||
specifier: ^5.1.0
|
||||
version: 5.1.0
|
||||
'@invoke-ai/ui-library':
|
||||
specifier: ^0.0.43
|
||||
version: 0.0.43(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
specifier: ^0.0.44
|
||||
version: 0.0.44(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@nanostores/react':
|
||||
specifier: ^0.7.3
|
||||
version: 0.7.3(nanostores@0.11.3)(react@18.3.1)
|
||||
@@ -515,8 +515,8 @@ packages:
|
||||
resolution: {integrity: sha512-MV6D4VLRIHr4PkW4zMyqfrNS1mPlCTiCXwvYGtDFQYr+xHFfonhAuf9WjsSc0nyp2m0OdkSLnzmVKkZFLo25Tg==}
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/anatomy@2.3.4:
|
||||
resolution: {integrity: sha512-fFIYN7L276gw0Q7/ikMMlZxP7mvnjRaWJ7f3Jsf9VtDOi6eAYIBRrhQe6+SZ0PGmoOkRaBc7gSE5oeIbgFFyrw==}
|
||||
/@chakra-ui/anatomy@2.3.5:
|
||||
resolution: {integrity: sha512-3im33cUOxCbISjaBlINE2u8BOwJSCdzpjCX0H+0JxK2xz26UaVA5xeI3NYHUoxDnr/QIrgfrllGxS0szYwOcyg==}
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/breakpoint-utils@2.0.8:
|
||||
@@ -573,12 +573,12 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/hooks@2.4.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-LRKiVE1oA7afT5tbbSKAy7Uas2xFHE6IkrQdbhWCHmkHBUtPvjQQDgwtnd4IRZPmoEfNGwoJ/MQpwOM/NRTTwA==}
|
||||
/@chakra-ui/hooks@2.4.3(react@18.3.1):
|
||||
resolution: {integrity: sha512-Sr2zsoTZw3p7HbrUy4aLpTIkE2XXUelAUgg3NGwMzrmx75bE0qVyiuuTFOuyEzGxYVV2Fe8QtcKKilm6RwzTGg==}
|
||||
peerDependencies:
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
'@zag-js/element-size': 0.31.1
|
||||
copy-to-clipboard: 3.3.3
|
||||
framesync: 6.1.2
|
||||
@@ -596,13 +596,13 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/icons@2.2.4(@chakra-ui/react@2.10.2)(react@18.3.1):
|
||||
/@chakra-ui/icons@2.2.4(@chakra-ui/react@2.10.4)(react@18.3.1):
|
||||
resolution: {integrity: sha512-l5QdBgwrAg3Sc2BRqtNkJpfuLw/pWRDwwT58J6c4PqQT6wzXxyNa8Q0PForu1ltB5qEiFb1kxr/F/HO1EwNa6g==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/react': '>=2.0.0'
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/react': 2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.4(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@@ -825,8 +825,8 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/react@2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-TfIHTqTlxTHYJZBtpiR5EZasPUrLYKJxdbHkdOJb5G1OQ+2c5kKl5XA7c2pMtsEptzb7KxAAIB62t3hxdfWp1w==}
|
||||
/@chakra-ui/react@2.10.4(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-XyRWnuZ1Uw7Mlj5pKUGO5/WhnIHP/EOrpy6lGZC1yWlkd0eIfIpYMZ1ALTZx4KPEdbBaes48dgiMT2ROCqLhkA==}
|
||||
peerDependencies:
|
||||
'@emotion/react': '>=11'
|
||||
'@emotion/styled': '>=11'
|
||||
@@ -834,10 +834,10 @@ packages:
|
||||
react: '>=18'
|
||||
react-dom: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/hooks': 2.4.2(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme': 3.4.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/hooks': 2.4.3(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.12.1(react@18.3.1)
|
||||
'@chakra-ui/theme': 3.4.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.11)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.11)(react@18.3.1)
|
||||
'@popperjs/core': 2.11.8
|
||||
@@ -868,10 +868,10 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/styled-system@2.11.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-y++z2Uop+hjfZX9mbH88F1ikazPv32asD2er56zMJBemUAzweXnHTpiCQbluEDSUDhqmghVZAdb+5L4XLbsRxA==}
|
||||
/@chakra-ui/styled-system@2.12.1(react@18.3.1):
|
||||
resolution: {integrity: sha512-DQph1nDiCPtgze7nDe0a36530ByXb5VpPosKGyWMvKocVeZJcDtYG6XM0+V5a0wKuFBXsViBBRIFUTiUesJAcg==}
|
||||
dependencies:
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
csstype: 3.1.3
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
@@ -915,14 +915,14 @@ packages:
|
||||
color2k: 2.0.3
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/theme-tools@2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1):
|
||||
resolution: {integrity: sha512-3UhKPyzKbV3l/bg1iQN9PBvffYp+EBOoYMUaeTUdieQRPFzo2jbYR0lNCxqv8h5aGM/k54nCHU2M/GStyi9F2A==}
|
||||
/@chakra-ui/theme-tools@2.2.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-K/VJd0QcnKik7m+qZTkggqNLep6+MPUu8IP5TUpHsnSM5R/RVjsJIR7gO8IZVAIMIGLLTIhGshHxeMekqv6LcQ==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/styled-system': '>=2.0.0'
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/anatomy': 2.3.5
|
||||
'@chakra-ui/styled-system': 2.12.1(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
color2k: 2.0.3
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
@@ -948,15 +948,15 @@ packages:
|
||||
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/theme@3.4.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1):
|
||||
resolution: {integrity: sha512-ZwFBLfiMC3URwaO31ONXoKH9k0TX0OW3UjdPF3EQkQpYyrk/fm36GkkzajjtdpWEd7rzDLRsQjPmvwNaSoNDtg==}
|
||||
/@chakra-ui/theme@3.4.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-pfewthgZTFNUYeUwGvhPQO/FTIyf375cFV1AT8N1y0aJiw4KDe7YTGm7p0aFy4AwAjH2ydMgeEx/lua4tx8qyQ==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/styled-system': '>=2.8.0'
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/anatomy': 2.3.5
|
||||
'@chakra-ui/styled-system': 2.12.1(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
dev: false
|
||||
@@ -981,8 +981,8 @@ packages:
|
||||
lodash.mergewith: 4.6.2
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/utils@2.2.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-jUPLT0JzRMWxpdzH6c+t0YMJYrvc5CLericgITV3zDSXblkfx3DsYXqU11DJTSGZI9dUKzM1Wd0Wswn4eJwvFQ==}
|
||||
/@chakra-ui/utils@2.2.3(react@18.3.1):
|
||||
resolution: {integrity: sha512-cldoCQuexZ6e07/9hWHKD4l1QXXlM1Nax9tuQOBvVf/EgwNZt3nZu8zZRDFlhAOKCTQDkmpLTTu+eXXjChNQOw==}
|
||||
peerDependencies:
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
@@ -1675,20 +1675,20 @@ packages:
|
||||
prettier: 3.3.3
|
||||
dev: true
|
||||
|
||||
/@invoke-ai/ui-library@0.0.43(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-t3fPYyks07ue3dEBPJuTHbeDLnDckDCOrtvc07mMDbLOnlPEZ0StaeiNGH+oO8qLzAuMAlSTdswgHfzTc2MmPw==}
|
||||
/@invoke-ai/ui-library@0.0.44(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-PDseHmdr8oi8cmrpx3UwIYHn4NduAJX2R0pM0pyM54xrCMPMgYiCbC/eOs8Gt4fBc2ziiPZ9UGoW4evnE3YJsg==}
|
||||
peerDependencies:
|
||||
'@fontsource-variable/inter': ^5.0.16
|
||||
react: ^18.2.0
|
||||
react-dom: ^18.2.0
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/icons': 2.2.4(@chakra-ui/react@2.10.2)(react@18.3.1)
|
||||
'@chakra-ui/anatomy': 2.2.2
|
||||
'@chakra-ui/icons': 2.2.4(@chakra-ui/react@2.10.4)(react@18.3.1)
|
||||
'@chakra-ui/layout': 2.3.1(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/portal': 2.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.4(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.9.2
|
||||
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.11)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.11)(react@18.3.1)
|
||||
'@fontsource-variable/inter': 5.1.0
|
||||
|
||||
@@ -1443,7 +1443,6 @@
|
||||
"deleteReferenceImage": "Referenzbild löschen",
|
||||
"referenceImage": "Referenzbild",
|
||||
"opacity": "Opazität",
|
||||
"resetCanvas": "Leinwand zurücksetzen",
|
||||
"removeBookmark": "Lesezeichen entfernen",
|
||||
"rasterLayer": "Raster-Ebene",
|
||||
"rasterLayers_withCount_visible": "Raster-Ebenen ({{count}})",
|
||||
|
||||
@@ -176,7 +176,8 @@
|
||||
"reset": "Reset",
|
||||
"none": "None",
|
||||
"new": "New",
|
||||
"generating": "Generating"
|
||||
"generating": "Generating",
|
||||
"warnings": "Warnings"
|
||||
},
|
||||
"hrf": {
|
||||
"hrf": "High Resolution Fix",
|
||||
@@ -263,7 +264,8 @@
|
||||
"iterations_one": "Iteration",
|
||||
"iterations_other": "Iterations",
|
||||
"generations_one": "Generation",
|
||||
"generations_other": "Generations"
|
||||
"generations_other": "Generations",
|
||||
"batchSize": "Batch Size"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "Invocation Cache",
|
||||
@@ -977,6 +979,8 @@
|
||||
"zoomOutNodes": "Zoom Out",
|
||||
"betaDesc": "This invocation is in beta. Until it is stable, it may have breaking changes during app updates. We plan to support this invocation long-term.",
|
||||
"prototypeDesc": "This invocation is a prototype. It may have breaking changes during app updates and may be removed at any time.",
|
||||
"internalDesc": "This invocation is used internally by Invoke. It may have breaking changes during app updates and may be removed at any time.",
|
||||
"specialDesc": "This invocation some special handling in the app. For example, Batch nodes are used to queue multiple graphs from a single workflow.",
|
||||
"imageAccessError": "Unable to find image {{image_name}}, resetting to default",
|
||||
"boardAccessError": "Unable to find board {{board_id}}, resetting to default",
|
||||
"modelAccessError": "Unable to find model {{key}}, resetting to default",
|
||||
@@ -1037,6 +1041,7 @@
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"systemDisconnected": "System disconnected",
|
||||
"layer": {
|
||||
"unsupportedModel": "layer not supported for selected base model",
|
||||
"controlAdapterNoModelSelected": "no Control Adapter model selected",
|
||||
"controlAdapterIncompatibleBaseModel": "incompatible Control Adapter base model",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, bbox width is {{width}}",
|
||||
@@ -1047,7 +1052,10 @@
|
||||
"ipAdapterIncompatibleBaseModel": "incompatible IP Adapter base model",
|
||||
"ipAdapterNoImageSelected": "no IP Adapter image selected",
|
||||
"rgNoPromptsOrIPAdapters": "no text prompts or IP Adapters",
|
||||
"rgNoRegion": "no region selected"
|
||||
"rgNegativePromptNotSupported": "negative prompt not supported for selected base model",
|
||||
"rgReferenceImagesNotSupported": "regional reference images not supported for selected base model",
|
||||
"rgAutoNegativeNotSupported": "auto-negative not supported for selected base model",
|
||||
"emptyLayer": "empty layer"
|
||||
}
|
||||
},
|
||||
"maskBlur": "Mask Blur",
|
||||
@@ -1316,8 +1324,9 @@
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Begin / End Step Percentage",
|
||||
"paragraphs": [
|
||||
"The part of the of the denoising process that will have the Control Adapter applied.",
|
||||
"Generally, Control Adapters applied at the start of the process guide composition, and Control Adapters applied at the end guide details."
|
||||
"This setting determines which portion of the denoising (generation) process incorporates the guidance from this layer.",
|
||||
"• Start Step (%): Specifies when to begin applying the guidance from this layer during the generation process.",
|
||||
"• End Step (%): Specifies when to stop applying this layer's guidance and revert general guidance from the model and other settings."
|
||||
]
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
@@ -1335,13 +1344,15 @@
|
||||
"paragraphs": ["Method to fit Control Adapter's input image size to the output generation size."]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Method",
|
||||
"paragraphs": ["Method by which to apply the current IP Adapter."]
|
||||
"heading": "Mode",
|
||||
"paragraphs": ["The mode defines how the reference image will guide the generation process."]
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "Weight",
|
||||
"paragraphs": [
|
||||
"Weight of the Control Adapter. Higher weight will lead to larger impacts on the final image."
|
||||
"Adjusts how strongly the layer influences the generation process",
|
||||
"• Higher Weight (.75-2): Creates a more significant impact on the final result.",
|
||||
"• Lower Weight (0-.75): Creates a smaller impact on the final result."
|
||||
]
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
@@ -1663,7 +1674,6 @@
|
||||
"newControlLayerError": "Problem Creating Control Layer",
|
||||
"newRasterLayerOk": "Created Raster Layer",
|
||||
"newRasterLayerError": "Problem Creating Raster Layer",
|
||||
"newFromImage": "New from Image",
|
||||
"pullBboxIntoLayerOk": "Bbox Pulled Into Layer",
|
||||
"pullBboxIntoLayerError": "Problem Pulling BBox Into Layer",
|
||||
"pullBboxIntoReferenceImageOk": "Bbox Pulled Into ReferenceImage",
|
||||
@@ -1676,7 +1686,7 @@
|
||||
"mergingLayers": "Merging layers",
|
||||
"clearHistory": "Clear History",
|
||||
"bboxOverlay": "Show Bbox Overlay",
|
||||
"resetCanvas": "Reset Canvas",
|
||||
"newSession": "New Session",
|
||||
"clearCaches": "Clear Caches",
|
||||
"recalculateRects": "Recalculate Rects",
|
||||
"clipToBbox": "Clip Strokes to Bbox",
|
||||
@@ -1708,8 +1718,10 @@
|
||||
"controlLayer": "Control Layer",
|
||||
"inpaintMask": "Inpaint Mask",
|
||||
"regionalGuidance": "Regional Guidance",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) as $t(controlLayers.rasterLayer)",
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) as $t(controlLayers.controlLayer)",
|
||||
"asRasterLayer": "As $t(controlLayers.rasterLayer)",
|
||||
"asRasterLayerResize": "As $t(controlLayers.rasterLayer) (Resize)",
|
||||
"asControlLayer": "As $t(controlLayers.controlLayer)",
|
||||
"asControlLayerResize": "As $t(controlLayers.controlLayer) (Resize)",
|
||||
"referenceImage": "Reference Image",
|
||||
"regionalReferenceImage": "Regional Reference Image",
|
||||
"globalReferenceImage": "Global Reference Image",
|
||||
@@ -1777,6 +1789,7 @@
|
||||
"pullBboxIntoLayer": "Pull Bbox into Layer",
|
||||
"pullBboxIntoReferenceImage": "Pull Bbox into Reference Image",
|
||||
"showProgressOnCanvas": "Show Progress on Canvas",
|
||||
"useImage": "Use Image",
|
||||
"prompt": "Prompt",
|
||||
"negativePrompt": "Negative Prompt",
|
||||
"beginEndStepPercentShort": "Begin/End %",
|
||||
@@ -1785,8 +1798,11 @@
|
||||
"newGallerySessionDesc": "This will clear the canvas and all settings except for your model selection. Generations will be sent to the gallery.",
|
||||
"newCanvasSession": "New Canvas Session",
|
||||
"newCanvasSessionDesc": "This will clear the canvas and all settings except for your model selection. Generations will be staged on the canvas.",
|
||||
"resetCanvasLayers": "Reset Canvas Layers",
|
||||
"resetGenerationSettings": "Reset Generation Settings",
|
||||
"replaceCurrent": "Replace Current",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer to get started.",
|
||||
"controlMode": {
|
||||
"controlMode": "Control Mode",
|
||||
"balanced": "Balanced (recommended)",
|
||||
@@ -1795,10 +1811,13 @@
|
||||
"megaControl": "Mega Control"
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"ipAdapterMethod": "IP Adapter Method",
|
||||
"ipAdapterMethod": "Mode",
|
||||
"full": "Style and Composition",
|
||||
"fullDesc": "Applies visual style (colors, textures) & composition (layout, structure).",
|
||||
"style": "Style Only",
|
||||
"composition": "Composition Only"
|
||||
"styleDesc": "Applies visual style (colors, textures) without considering its layout.",
|
||||
"composition": "Composition Only",
|
||||
"compositionDesc": "Replicates layout & structure while ignoring the reference's style."
|
||||
},
|
||||
"fill": {
|
||||
"fillColor": "Fill Color",
|
||||
@@ -2114,11 +2133,73 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"<StrongComponent>SD 3.5</StrongComponent>: Support for SD 3.5 Medium and Large.",
|
||||
"<StrongComponent>Canvas</StrongComponent>: Streamlined Control Layer processing and improved default Control settings."
|
||||
"<StrongComponent>Workflows</StrongComponent>: Run a workflow for a collection of images using the new <StrongComponent>Image Batch</StrongComponent> node.",
|
||||
"<StrongComponent>FLUX</StrongComponent>: Support for XLabs IP Adapter v2."
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
"watchUiUpdatesOverview": "Watch UI Updates Overview"
|
||||
},
|
||||
"supportVideos": {
|
||||
"supportVideos": "Support Videos",
|
||||
"gettingStarted": "Getting Started",
|
||||
"controlCanvas": "Control Canvas",
|
||||
"watch": "Watch",
|
||||
"studioSessionsDesc1": "Check out the <StudioSessionsPlaylistLink /> for Invoke deep dives.",
|
||||
"studioSessionsDesc2": "Join our <DiscordLink /> to participate in the live sessions and ask questions. Sessions are uploaded to the playlist the following week.",
|
||||
"videos": {
|
||||
"creatingYourFirstImage": {
|
||||
"title": "Creating Your First Image",
|
||||
"description": "Introduction to creating an image from scratch using Invoke's tools."
|
||||
},
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"title": "Using Control Layers and Reference Guides",
|
||||
"description": "Learn how to guide your image creation with control layers and reference images."
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"title": "Understanding Image-to-Image and Denoising",
|
||||
"description": "Overview of image-to-image transformations and denoising in Invoke."
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"title": "Exploring AI Models and Concept Adapters",
|
||||
"description": "Dive into AI models and how to use concept adapters for creative control."
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"title": "Creating and Composing on Invoke's Control Canvas",
|
||||
"description": "Learn to compose images using Invoke's control canvas."
|
||||
},
|
||||
"upscaling": {
|
||||
"title": "Upscaling",
|
||||
"description": "How to upscale images with Invoke's tools to enhance resolution."
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"title": "How Do I Generate and Save to the Gallery?",
|
||||
"description": "Steps to generate and save images to the gallery."
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"title": "How Do I Edit on the Canvas?",
|
||||
"description": "Guide to editing images directly on the canvas."
|
||||
},
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"title": "How Do I Do Image-to-Image Transformation?",
|
||||
"description": "Tutorial on performing image-to-image transformations in Invoke."
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "How Do I Use Control Nets and Control Layers?",
|
||||
"description": "Learn to apply control layers and controlnets to your images."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"title": "How Do I Use Global IP Adapters and Reference Images?",
|
||||
"description": "Introduction to adding reference images and global IP adapters."
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "How Do I Use Inpaint Masks?",
|
||||
"description": "How to apply inpaint masks for image correction and variation."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"title": "How Do I Outpaint?",
|
||||
"description": "Guide to outpainting beyond the original image borders."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1985,7 +1985,6 @@
|
||||
"inpaintMask_withCount_many": "Remplir les masques",
|
||||
"inpaintMask_withCount_other": "Remplir les masques",
|
||||
"newImg2ImgCanvasFromImage": "Nouvelle Img2Img à partir de l'image",
|
||||
"resetCanvas": "Réinitialiser la Toile",
|
||||
"bboxOverlay": "Afficher la superposition des Bounding Box",
|
||||
"moveToFront": "Déplacer vers le permier plan",
|
||||
"moveToBack": "Déplacer vers l'arrière plan",
|
||||
@@ -2034,7 +2033,6 @@
|
||||
"help2": "Commencez par un point <Bold>Inclure</Bold> au sein de l'objet cible. Ajoutez d'autres points pour affiner la sélection. Moins de points produisent généralement de meilleurs résultats.",
|
||||
"help3": "Inversez la sélection pour sélectionner tout sauf l'objet cible."
|
||||
},
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) en tant que $t(controlLayers.controlLayer)",
|
||||
"convertRegionalGuidanceTo": "Convertir $t(controlLayers.regionalGuidance) vers",
|
||||
"copyRasterLayerTo": "Copier $t(controlLayers.rasterLayer) vers",
|
||||
"newControlLayer": "Nouveau $t(controlLayers.controlLayer)",
|
||||
@@ -2044,8 +2042,7 @@
|
||||
"convertInpaintMaskTo": "Convertir $t(controlLayers.inpaintMask) vers",
|
||||
"copyControlLayerTo": "Copier $t(controlLayers.controlLayer) vers",
|
||||
"newInpaintMask": "Nouveau $t(controlLayers.inpaintMask)",
|
||||
"newRasterLayer": "Nouveau $t(controlLayers.rasterLayer)",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) en tant que $t(controlLayers.rasterLayer)"
|
||||
"newRasterLayer": "Nouveau $t(controlLayers.rasterLayer)"
|
||||
},
|
||||
"upscaling": {
|
||||
"exceedsMaxSizeDetails": "La limite maximale d'agrandissement est de {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixels. Veuillez essayer une image plus petite ou réduire votre sélection d'échelle.",
|
||||
|
||||
@@ -1750,7 +1750,6 @@
|
||||
"newRegionalReferenceImageError": "Problema nella creazione dell'immagine di riferimento regionale",
|
||||
"newControlLayerOk": "Livello di controllo creato",
|
||||
"bboxOverlay": "Mostra sovrapposizione riquadro",
|
||||
"resetCanvas": "Reimposta la tela",
|
||||
"outputOnlyMaskedRegions": "In uscita solo le regioni generate",
|
||||
"enableAutoNegative": "Abilita Auto Negativo",
|
||||
"disableAutoNegative": "Disabilita Auto Negativo",
|
||||
@@ -2036,8 +2035,6 @@
|
||||
"convertControlLayerTo": "Converti $t(controlLayers.controlLayer) in",
|
||||
"newRasterLayer": "Nuovo $t(controlLayers.rasterLayer)",
|
||||
"newRegionalGuidance": "Nuova $t(controlLayers.regionalGuidance)",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) come $t(controlLayers.rasterLayer)",
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) come $t(controlLayers.controlLayer)",
|
||||
"convertInpaintMaskTo": "Converti $t(controlLayers.inpaintMask) in",
|
||||
"copyRegionalGuidanceTo": "Copia $t(controlLayers.regionalGuidance) in",
|
||||
"convertRasterLayerTo": "Converti $t(controlLayers.rasterLayer) in",
|
||||
@@ -2046,7 +2043,6 @@
|
||||
"newInpaintMask": "Nuova $t(controlLayers.inpaintMask)",
|
||||
"replaceCurrent": "Sostituisci corrente",
|
||||
"mergeDown": "Unire in basso",
|
||||
"newFromImage": "Nuovo da Immagine",
|
||||
"mergingLayers": "Unione dei livelli",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare."
|
||||
},
|
||||
|
||||
@@ -637,7 +637,6 @@
|
||||
"cancel": "キャンセル",
|
||||
"reset": "リセット"
|
||||
},
|
||||
"resetCanvas": "キャンバスをリセット",
|
||||
"cropLayerToBbox": "レイヤーをバウンディングボックスでクロップ",
|
||||
"convertInpaintMaskTo": "$t(controlLayers.inpaintMask)を変換",
|
||||
"regionalGuidance_withCount_other": "領域ガイダンス",
|
||||
|
||||
@@ -1660,7 +1660,6 @@
|
||||
"clearCaches": "Очистить кэши",
|
||||
"recalculateRects": "Пересчитать прямоугольники",
|
||||
"saveBboxToGallery": "Сохранить рамку в галерею",
|
||||
"resetCanvas": "Сбросить холст",
|
||||
"canvas": "Холст",
|
||||
"global": "Глобальный",
|
||||
"newGlobalReferenceImageError": "Проблема с созданием глобального эталонного изображения",
|
||||
|
||||
@@ -1601,11 +1601,9 @@
|
||||
"bookmark": "Đánh Dấu Để Đổi Nhanh",
|
||||
"saveCanvasToGallery": "Lưu Canvas Vào Thư Viện",
|
||||
"cropLayerToBbox": "Xén Layer Vào Hộp Giới Hạn",
|
||||
"newFromImage": "Mới Từ Ảnh",
|
||||
"mergeDown": "Gộp Xuống",
|
||||
"mergeVisibleError": "Lỗi khi gộp layer",
|
||||
"bboxOverlay": "Hiển Thị Lớp Phủ Trên Hộp Giới Hạn",
|
||||
"resetCanvas": "Khởi Động Lại Canvas",
|
||||
"duplicate": "Nhân Bản",
|
||||
"moveForward": "Chuyển Lên Đầu",
|
||||
"fitBboxToLayers": "Xếp Vừa Hộp Giới Hạn Vào Layer",
|
||||
@@ -1643,7 +1641,6 @@
|
||||
"replaceCurrent": "Thay Đổi Cái Hiện Tại",
|
||||
"controlLayers_withCount_visible": "Layer Điều Khiển Được ({{count}})",
|
||||
"hidingType": "Ẩn {{type}}",
|
||||
"canvasAsRasterLayer": "Biến $t(controlLayers.canvas) Thành $t(controlLayers.rasterLayer)",
|
||||
"newImg2ImgCanvasFromImage": "Chuyển Đổi Ảnh Sang Ảnh Mới Từ Ảnh",
|
||||
"copyToClipboard": "Sao Chép Vào Clipboard",
|
||||
"logDebugInfo": "Thông Tin Log Gỡ Lỗi",
|
||||
@@ -1670,7 +1667,6 @@
|
||||
"sendToGallery": "Chuyển Tới Thư Viện",
|
||||
"unlocked": "Mở Khoá",
|
||||
"addReferenceImage": "Thêm $t(controlLayers.referenceImage)",
|
||||
"canvasAsControlLayer": "Biến $t(controlLayers.canvas) Thành $t(controlLayers.controlLayer)",
|
||||
"sendingToCanvas": "Chuyển Ảnh Tạo Sinh Vào Canvas",
|
||||
"sendingToGallery": "Chuyển Ảnh Tạo Sinh Vào Thư Viện",
|
||||
"viewProgressOnCanvas": "Xem quá trình xử lý và ảnh đầu ra trong <Btn>Canvas</Btn>.",
|
||||
|
||||
@@ -1720,8 +1720,6 @@
|
||||
"sendToCanvas": "发送到画布",
|
||||
"controlLayers_withCount_visible": "控制图层({{count}} 个)",
|
||||
"rasterLayers_withCount_visible": "栅格图层({{count}} 个)",
|
||||
"canvasAsRasterLayer": "将 $t(controlLayers.canvas) 转换为 $t(controlLayers.rasterLayer)",
|
||||
"canvasAsControlLayer": "将 $t(controlLayers.canvas) 转换为 $t(controlLayers.controlLayer)",
|
||||
"convertRegionalGuidanceTo": "将 $t(controlLayers.regionalGuidance) 转换为",
|
||||
"newInpaintMask": "新建 $t(controlLayers.inpaintMask)",
|
||||
"regionIsEmpty": "选定区域为空",
|
||||
@@ -1760,11 +1758,9 @@
|
||||
"pullBboxIntoLayerError": "将边界框导入图层时出现问题",
|
||||
"pullBboxIntoLayerOk": "边界框已导入到图层",
|
||||
"sendToCanvasDesc": "按下“Invoke”按钮会将您的工作进度暂存到画布上。",
|
||||
"resetCanvas": "重置画布",
|
||||
"sendToGallery": "发送到图库",
|
||||
"sendToGalleryDesc": "按下“Invoke”键会生成并保存一张唯一的图像到您的图库中。",
|
||||
"rasterLayer_withCount_other": "栅格图层",
|
||||
"newFromImage": "从图像创建新内容",
|
||||
"mergeDown": "向下合并",
|
||||
"clearCaches": "清除缓存",
|
||||
"recalculateRects": "重新计算矩形",
|
||||
|
||||
@@ -27,6 +27,7 @@ import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/Cl
|
||||
import { DeleteStylePresetDialog } from 'features/stylePresets/components/DeleteStylePresetDialog';
|
||||
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
|
||||
import RefreshAfterResetModal from 'features/system/components/SettingsModal/RefreshAfterResetModal';
|
||||
import { VideosModal } from 'features/system/components/VideosModal/VideosModal';
|
||||
import { configChanged } from 'features/system/store/configSlice';
|
||||
import { selectLanguage } from 'features/system/store/systemSelectors';
|
||||
import { AppContent } from 'features/ui/components/AppContent';
|
||||
@@ -108,6 +109,7 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
<NewCanvasSessionDialog />
|
||||
<ImageContextMenu />
|
||||
<FullscreenDropzone />
|
||||
<VideosModal />
|
||||
</ErrorBoundary>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppStore } from 'app/store/storeHooks';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { withResultAsync } from 'common/util/result';
|
||||
@@ -9,6 +10,7 @@ import { imageDTOToImageObject } from 'features/controlLayers/store/util';
|
||||
import { $imageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { sentImageToCanvas } from 'features/gallery/store/actions';
|
||||
import { parseAndRecallAllMetadata } from 'features/metadata/util/handlers';
|
||||
import { $hasTemplates } from 'features/nodes/store/nodesSlice';
|
||||
import { $isWorkflowListMenuIsOpen } from 'features/nodes/store/workflowListMenu';
|
||||
import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
@@ -51,6 +53,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
const { t } = useTranslation();
|
||||
// Use a ref to ensure that we only perform the action once
|
||||
const didInit = useRef(false);
|
||||
const didParseOpenAPISchema = useStore($hasTemplates);
|
||||
const store = useAppStore();
|
||||
const { getAndLoadWorkflow } = useGetAndLoadLibraryWorkflow();
|
||||
|
||||
@@ -174,7 +177,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (didInit.current || !action) {
|
||||
if (didInit.current || !action || !didParseOpenAPISchema) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -187,22 +190,29 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
case 'selectStylePreset':
|
||||
handleSelectStylePreset(action.data.stylePresetId);
|
||||
break;
|
||||
|
||||
case 'sendToCanvas':
|
||||
handleSendToCanvas(action.data.imageName);
|
||||
break;
|
||||
|
||||
case 'useAllParameters':
|
||||
handleUseAllMetadata(action.data.imageName);
|
||||
break;
|
||||
|
||||
case 'goToDestination':
|
||||
handleGoToDestination(action.data.destination);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}, [
|
||||
handleSendToCanvas,
|
||||
handleUseAllMetadata,
|
||||
action,
|
||||
handleLoadWorkflow,
|
||||
handleSelectStylePreset,
|
||||
handleGoToDestination,
|
||||
handleLoadWorkflow,
|
||||
didParseOpenAPISchema,
|
||||
]);
|
||||
};
|
||||
|
||||
@@ -4,7 +4,7 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'
|
||||
import { buildAdHocPostProcessingGraph } from 'features/nodes/util/graph/buildAdHocPostProcessingGraph';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig, ImageDTO } from 'services/api/types';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
@@ -32,9 +32,7 @@ export const addAdHocPostProcessingRequestedListener = (startAppListening: AppSt
|
||||
|
||||
try {
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, enqueueMutationFixedCacheKeyOptions)
|
||||
);
|
||||
|
||||
const enqueueResult = await req.unwrap();
|
||||
|
||||
@@ -13,7 +13,7 @@ import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGr
|
||||
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import { assert, AssertionError } from 'tsafe';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
@@ -91,9 +91,7 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
}
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(prepareBatchResult.value, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
queueApi.endpoints.enqueueBatch.initiate(prepareBatchResult.value, enqueueMutationFixedCacheKeyOptions)
|
||||
);
|
||||
req.reset();
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import { isImageFieldCollectionInputInstance } from 'features/nodes/types/field'
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { buildNodesGraph } from 'features/nodes/util/graph/buildNodesGraph';
|
||||
import { buildWorkflowWithValidation } from 'features/nodes/util/workflow/buildWorkflow';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { Batch, BatchConfig } from 'services/api/types';
|
||||
|
||||
const log = logger('workflows');
|
||||
@@ -70,11 +70,7 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
prepend: action.payload.prepend,
|
||||
};
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
const req = dispatch(queueApi.endpoints.enqueueBatch.initiate(batchConfig, enqueueMutationFixedCacheKeyOptions));
|
||||
try {
|
||||
await req.unwrap();
|
||||
} finally {
|
||||
|
||||
@@ -2,7 +2,7 @@ import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildMultidiffusionUpscaleGraph } from 'features/nodes/util/graph/buildMultidiffusionUpscaleGraph';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
|
||||
export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
@@ -16,11 +16,7 @@ export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening)
|
||||
|
||||
const batchConfig = prepareLinearUIBatch(state, g, prepend, noise, posCond, 'upscaling', 'gallery');
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
const req = dispatch(queueApi.endpoints.enqueueBatch.initiate(batchConfig, enqueueMutationFixedCacheKeyOptions));
|
||||
try {
|
||||
await req.unwrap();
|
||||
} finally {
|
||||
|
||||
@@ -25,9 +25,7 @@ export type AppFeature =
|
||||
| 'invocationCache'
|
||||
| 'bulkDownload'
|
||||
| 'starterModels'
|
||||
| 'hfToken'
|
||||
| 'invocationProgressAlert';
|
||||
|
||||
| 'hfToken';
|
||||
/**
|
||||
* A disable-able Stable Diffusion feature
|
||||
*/
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import {
|
||||
useNewCanvasSession,
|
||||
useNewGallerySession,
|
||||
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
|
||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
||||
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowsCounterClockwiseBold, PiFilePlusBold } from 'react-icons/pi';
|
||||
|
||||
export const SessionMenuItems = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const { newGallerySessionWithDialog } = useNewGallerySession();
|
||||
const { newCanvasSessionWithDialog } = useNewCanvasSession();
|
||||
const resetCanvasLayers = useCallback(() => {
|
||||
dispatch(canvasReset());
|
||||
}, [dispatch]);
|
||||
const resetGenerationSettings = useCallback(() => {
|
||||
dispatch(paramsReset());
|
||||
}, [dispatch]);
|
||||
return (
|
||||
<>
|
||||
<MenuItem icon={<PiFilePlusBold />} onClick={newGallerySessionWithDialog}>
|
||||
{t('controlLayers.newGallerySession')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiFilePlusBold />} onClick={newCanvasSessionWithDialog}>
|
||||
{t('controlLayers.newCanvasSession')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClick={resetCanvasLayers}>
|
||||
{t('controlLayers.resetCanvasLayers')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClick={resetGenerationSettings}>
|
||||
{t('controlLayers.resetGenerationSettings')}
|
||||
</MenuItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
SessionMenuItems.displayName = 'SessionMenuItems';
|
||||
@@ -46,7 +46,7 @@ const REGION_TARGETS: Record<FocusRegionName, Set<HTMLElement>> = {
|
||||
/**
|
||||
* The currently-focused region or `null` if no region is focused.
|
||||
*/
|
||||
const $focusedRegion = atom<FocusRegionName | null>(null);
|
||||
export const $focusedRegion = atom<FocusRegionName | null>(null);
|
||||
|
||||
/**
|
||||
* A map of focus regions to atoms that indicate if that region is focused.
|
||||
|
||||
@@ -1,387 +0,0 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { $true } from 'app/store/nanostores/util';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useCanvasManagerSafe } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
import type { Templates } from 'features/nodes/store/types';
|
||||
import { selectWorkflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { isImageFieldCollectionInputInstance, isImageFieldCollectionInputTemplate } from 'features/nodes/types/field';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { selectUpscaleSlice } from 'features/parameters/store/upscaleSlice';
|
||||
import { selectConfigSlice } from 'features/system/store/configSlice';
|
||||
import { selectSystemSlice } from 'features/system/store/systemSlice';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import i18n from 'i18next';
|
||||
import { forEach, upperFirst } from 'lodash-es';
|
||||
import { useMemo } from 'react';
|
||||
import { getConnectedEdges } from 'reactflow';
|
||||
import { $isConnected } from 'services/events/stores';
|
||||
|
||||
const LAYER_TYPE_TO_TKEY = {
|
||||
reference_image: 'controlLayers.referenceImage',
|
||||
inpaint_mask: 'controlLayers.inpaintMask',
|
||||
regional_guidance: 'controlLayers.regionalGuidance',
|
||||
raster_layer: 'controlLayers.rasterLayer',
|
||||
control_layer: 'controlLayers.controlLayer',
|
||||
} as const;
|
||||
|
||||
const createSelector = (arg: {
|
||||
templates: Templates;
|
||||
isConnected: boolean;
|
||||
canvasIsFiltering: boolean;
|
||||
canvasIsTransforming: boolean;
|
||||
canvasIsRasterizing: boolean;
|
||||
canvasIsCompositing: boolean;
|
||||
canvasIsSelectingObject: boolean;
|
||||
}) => {
|
||||
const {
|
||||
templates,
|
||||
isConnected,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsCompositing,
|
||||
canvasIsSelectingObject,
|
||||
} = arg;
|
||||
return createMemoizedSelector(
|
||||
[
|
||||
selectSystemSlice,
|
||||
selectNodesSlice,
|
||||
selectWorkflowSettingsSlice,
|
||||
selectDynamicPromptsSlice,
|
||||
selectCanvasSlice,
|
||||
selectParamsSlice,
|
||||
selectUpscaleSlice,
|
||||
selectConfigSlice,
|
||||
selectActiveTab,
|
||||
],
|
||||
(system, nodes, workflowSettings, dynamicPrompts, canvas, params, upscale, config, activeTabName) => {
|
||||
const { bbox } = canvas;
|
||||
const { model, positivePrompt } = params;
|
||||
|
||||
const reasons: { prefix?: string; content: string }[] = [];
|
||||
|
||||
// Cannot generate if not connected
|
||||
if (!isConnected) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.systemDisconnected') });
|
||||
}
|
||||
|
||||
if (activeTabName === 'workflows') {
|
||||
if (workflowSettings.shouldValidateGraph) {
|
||||
if (!nodes.nodes.length) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noNodesInGraph') });
|
||||
}
|
||||
|
||||
nodes.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const nodeTemplate = templates[node.data.type];
|
||||
|
||||
if (!nodeTemplate) {
|
||||
// Node type not found
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingNodeTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
const connectedEdges = getConnectedEdges([node], nodes.edges);
|
||||
|
||||
forEach(node.data.inputs, (field) => {
|
||||
const fieldTemplate = nodeTemplate.inputs[field.name];
|
||||
const hasConnection = connectedEdges.some(
|
||||
(edge) => edge.target === node.id && edge.targetHandle === field.name
|
||||
);
|
||||
|
||||
if (!fieldTemplate) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingFieldTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
const baseTKeyOptions = {
|
||||
nodeLabel: node.data.label || nodeTemplate.title,
|
||||
fieldLabel: field.label || fieldTemplate.title,
|
||||
};
|
||||
|
||||
if (fieldTemplate.required && field.value === undefined && !hasConnection) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingInputForField', baseTKeyOptions) });
|
||||
return;
|
||||
} else if (
|
||||
field.value &&
|
||||
isImageFieldCollectionInputInstance(field) &&
|
||||
isImageFieldCollectionInputTemplate(fieldTemplate)
|
||||
) {
|
||||
// Image collections may have min or max items to validate
|
||||
// TODO(psyche): generalize this to other collection types
|
||||
if (fieldTemplate.minItems !== undefined && fieldTemplate.minItems > 0 && field.value.length === 0) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.collectionEmpty', baseTKeyOptions) });
|
||||
return;
|
||||
}
|
||||
if (fieldTemplate.minItems !== undefined && field.value.length < fieldTemplate.minItems) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.collectionTooFewItems', {
|
||||
...baseTKeyOptions,
|
||||
size: field.value.length,
|
||||
minItems: fieldTemplate.minItems,
|
||||
}),
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (fieldTemplate.maxItems !== undefined && field.value.length > fieldTemplate.maxItems) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.collectionTooManyItems', {
|
||||
...baseTKeyOptions,
|
||||
size: field.value.length,
|
||||
maxItems: fieldTemplate.maxItems,
|
||||
}),
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
} else if (activeTabName === 'upscaling') {
|
||||
if (!upscale.upscaleInitialImage) {
|
||||
reasons.push({ content: i18n.t('upscaling.missingUpscaleInitialImage') });
|
||||
} else if (config.maxUpscaleDimension) {
|
||||
const { width, height } = upscale.upscaleInitialImage;
|
||||
const { scale } = upscale;
|
||||
|
||||
const maxPixels = config.maxUpscaleDimension ** 2;
|
||||
const upscaledPixels = width * scale * height * scale;
|
||||
|
||||
if (upscaledPixels > maxPixels) {
|
||||
reasons.push({ content: i18n.t('upscaling.exceedsMaxSize') });
|
||||
}
|
||||
}
|
||||
if (model && !['sd-1', 'sdxl'].includes(model.base)) {
|
||||
// When we are using an upsupported model, do not add the other warnings
|
||||
reasons.push({ content: i18n.t('upscaling.incompatibleBaseModel') });
|
||||
} else {
|
||||
// Using a compatible model, add all warnings
|
||||
if (!model) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelSelected') });
|
||||
}
|
||||
if (!upscale.upscaleModel) {
|
||||
reasons.push({ content: i18n.t('upscaling.missingUpscaleModel') });
|
||||
}
|
||||
if (!upscale.tileControlnetModel) {
|
||||
reasons.push({ content: i18n.t('upscaling.missingTileControlNetModel') });
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (canvasIsFiltering) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsFiltering') });
|
||||
}
|
||||
if (canvasIsTransforming) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsTransforming') });
|
||||
}
|
||||
if (canvasIsRasterizing) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsRasterizing') });
|
||||
}
|
||||
if (canvasIsCompositing) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsCompositing') });
|
||||
}
|
||||
if (canvasIsSelectingObject) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsSelectingObject') });
|
||||
}
|
||||
|
||||
if (dynamicPrompts.prompts.length === 0 && getShouldProcessPrompt(positivePrompt)) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noPrompts') });
|
||||
}
|
||||
|
||||
if (!model) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelSelected') });
|
||||
}
|
||||
|
||||
if (model?.base === 'flux') {
|
||||
if (!params.t5EncoderModel) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noT5EncoderModelSelected') });
|
||||
}
|
||||
if (!params.clipEmbedModel) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noCLIPEmbedModelSelected') });
|
||||
}
|
||||
if (!params.fluxVAE) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noFLUXVAEModelSelected') });
|
||||
}
|
||||
if (bbox.scaleMethod === 'none') {
|
||||
if (bbox.rect.width % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleBboxWidth', { width: bbox.rect.width }),
|
||||
});
|
||||
}
|
||||
if (bbox.rect.height % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleBboxHeight', { height: bbox.rect.height }),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (bbox.scaledSize.width % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleScaledBboxWidth', {
|
||||
width: bbox.scaledSize.width,
|
||||
}),
|
||||
});
|
||||
}
|
||||
if (bbox.scaledSize.height % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleScaledBboxHeight', {
|
||||
height: bbox.scaledSize.height,
|
||||
}),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
canvas.controlLayers.entities
|
||||
.filter((controlLayer) => controlLayer.isEnabled)
|
||||
.forEach((controlLayer, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY['control_layer']);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
// Must have model
|
||||
if (!controlLayer.controlAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (controlLayer.controlAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
canvas.referenceImages.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
|
||||
// Must have model
|
||||
if (!entity.ipAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (entity.ipAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have an image
|
||||
if (!entity.ipAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoImageSelected'));
|
||||
}
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
canvas.regionalGuidance.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
// Must have a region
|
||||
if (entity.objects.length === 0) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.rgNoRegion'));
|
||||
}
|
||||
// Must have at least 1 prompt or IP Adapter
|
||||
if (
|
||||
entity.positivePrompt === null &&
|
||||
entity.negativePrompt === null &&
|
||||
entity.referenceImages.length === 0
|
||||
) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.rgNoPromptsOrIPAdapters'));
|
||||
}
|
||||
entity.referenceImages.forEach(({ ipAdapter }) => {
|
||||
// Must have model
|
||||
if (!ipAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (ipAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have an image
|
||||
if (!ipAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoImageSelected'));
|
||||
}
|
||||
});
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
canvas.rasterLayers.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return { isReady: !reasons.length, reasons };
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
export const useIsReadyToEnqueue = () => {
|
||||
const templates = useStore($templates);
|
||||
const isConnected = useStore($isConnected);
|
||||
const canvasManager = useCanvasManagerSafe();
|
||||
const canvasIsFiltering = useStore(canvasManager?.stateApi.$isFiltering ?? $true);
|
||||
const canvasIsTransforming = useStore(canvasManager?.stateApi.$isTransforming ?? $true);
|
||||
const canvasIsRasterizing = useStore(canvasManager?.stateApi.$isRasterizing ?? $true);
|
||||
const canvasIsSelectingObject = useStore(canvasManager?.stateApi.$isSegmenting ?? $true);
|
||||
const canvasIsCompositing = useStore(canvasManager?.compositor.$isBusy ?? $true);
|
||||
const selector = useMemo(
|
||||
() =>
|
||||
createSelector({
|
||||
templates,
|
||||
isConnected,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsCompositing,
|
||||
canvasIsSelectingObject,
|
||||
}),
|
||||
[
|
||||
templates,
|
||||
isConnected,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsCompositing,
|
||||
canvasIsSelectingObject,
|
||||
]
|
||||
);
|
||||
const value = useAppSelector(selector);
|
||||
return value;
|
||||
};
|
||||
@@ -63,7 +63,7 @@ export const CanvasAddEntityButtons = memo(() => {
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addRegionalGuidance}
|
||||
isDisabled={isFLUX || isSD3}
|
||||
isDisabled={isSD3}
|
||||
>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</Button>
|
||||
|
||||
@@ -2,7 +2,6 @@ import { Alert, AlertDescription, AlertIcon, AlertTitle } from '@invoke-ai/ui-li
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useDeferredModelLoadingInvocationProgressMessage } from 'features/controlLayers/hooks/useDeferredModelLoadingInvocationProgressMessage';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { selectIsLocal } from 'features/system/store/configSlice';
|
||||
import { selectSystemShouldShowInvocationProgressDetail } from 'features/system/store/systemSlice';
|
||||
import { memo } from 'react';
|
||||
@@ -44,20 +43,14 @@ const CanvasAlertsInvocationProgressContentCommercial = memo(() => {
|
||||
CanvasAlertsInvocationProgressContentCommercial.displayName = 'CanvasAlertsInvocationProgressContentCommercial';
|
||||
|
||||
export const CanvasAlertsInvocationProgress = memo(() => {
|
||||
const isProgressMessageAlertEnabled = useFeatureStatus('invocationProgressAlert');
|
||||
const shouldShowInvocationProgressDetail = useAppSelector(selectSystemShouldShowInvocationProgressDetail);
|
||||
const isLocal = useAppSelector(selectIsLocal);
|
||||
|
||||
// The alert is disabled at the system level
|
||||
if (!isProgressMessageAlertEnabled) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!isLocal) {
|
||||
return <CanvasAlertsInvocationProgressContentCommercial />;
|
||||
}
|
||||
|
||||
// The alert is disabled at the user level
|
||||
// OSS user setting
|
||||
if (!shouldShowInvocationProgressDetail) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask}>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance} isDisabled={isFLUX || isSD3}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance} isDisabled={isSD3}>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalReferenceImage} isDisabled={isFLUX || isSD3}>
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import type { IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import { isIPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import { selectSystemShouldEnableModelDescriptions } from 'features/system/store/systemSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { assert } from 'tsafe';
|
||||
@@ -14,13 +16,27 @@ type Props = {
|
||||
|
||||
export const IPAdapterMethod = memo(({ method, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const shouldShowModelDescriptions = useAppSelector(selectSystemShouldEnableModelDescriptions);
|
||||
|
||||
const options: { label: string; value: IPMethodV2 }[] = useMemo(
|
||||
() => [
|
||||
{ label: t('controlLayers.ipAdapterMethod.full'), value: 'full' },
|
||||
{ label: t('controlLayers.ipAdapterMethod.style'), value: 'style' },
|
||||
{ label: t('controlLayers.ipAdapterMethod.composition'), value: 'composition' },
|
||||
{
|
||||
label: t('controlLayers.ipAdapterMethod.full'),
|
||||
value: 'full',
|
||||
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.fullDesc') : undefined,
|
||||
},
|
||||
{
|
||||
label: t('controlLayers.ipAdapterMethod.style'),
|
||||
value: 'style',
|
||||
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.styleDesc') : undefined,
|
||||
},
|
||||
{
|
||||
label: t('controlLayers.ipAdapterMethod.composition'),
|
||||
value: 'composition',
|
||||
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.compositionDesc') : undefined,
|
||||
},
|
||||
],
|
||||
[t]
|
||||
[t, shouldShowModelDescriptions]
|
||||
);
|
||||
const _onChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
|
||||
@@ -5,6 +5,7 @@ import { BeginEndStepPct } from 'features/controlLayers/components/common/BeginE
|
||||
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
|
||||
import { Weight } from 'features/controlLayers/components/common/Weight';
|
||||
import { IPAdapterMethod } from 'features/controlLayers/components/IPAdapter/IPAdapterMethod';
|
||||
import { IPAdapterSettingsEmptyState } from 'features/controlLayers/components/IPAdapter/IPAdapterSettingsEmptyState';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { usePullBboxIntoGlobalReferenceImage } from 'features/controlLayers/hooks/saveCanvasHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
@@ -17,7 +18,7 @@ import {
|
||||
referenceImageIPAdapterWeightChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectIsFLUX } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import { selectCanvasSlice, selectEntity, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier, CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import type { SetGlobalReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
import { setGlobalReferenceImageDndTarget } from 'features/dnd/dnd';
|
||||
@@ -35,7 +36,7 @@ const buildSelectIPAdapter = (entityIdentifier: CanvasEntityIdentifier<'referenc
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'IPAdapterSettings').ipAdapter
|
||||
);
|
||||
|
||||
export const IPAdapterSettings = memo(() => {
|
||||
const IPAdapterSettingsContent = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('reference_image');
|
||||
@@ -134,4 +135,25 @@ export const IPAdapterSettings = memo(() => {
|
||||
);
|
||||
});
|
||||
|
||||
IPAdapterSettingsContent.displayName = 'IPAdapterSettingsContent';
|
||||
|
||||
const buildSelectIPAdapterHasImage = (entityIdentifier: CanvasEntityIdentifier<'reference_image'>) =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const referenceImage = selectEntity(canvas, entityIdentifier);
|
||||
return !!referenceImage && referenceImage.ipAdapter.image !== null;
|
||||
});
|
||||
|
||||
export const IPAdapterSettings = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('reference_image');
|
||||
|
||||
const selectIPAdapterHasImage = useMemo(() => buildSelectIPAdapterHasImage(entityIdentifier), [entityIdentifier]);
|
||||
const hasImage = useAppSelector(selectIPAdapterHasImage);
|
||||
|
||||
if (!hasImage) {
|
||||
return <IPAdapterSettingsEmptyState />;
|
||||
}
|
||||
|
||||
return <IPAdapterSettingsContent />;
|
||||
});
|
||||
|
||||
IPAdapterSettings.displayName = 'IPAdapterSettings';
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
import { Button, Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import type { SetGlobalReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
import { setGlobalReferenceImageDndTarget } from 'features/dnd/dnd';
|
||||
import { DndDropTarget } from 'features/dnd/DndDropTarget';
|
||||
import { setGlobalReferenceImage } from 'features/imageActions/actions';
|
||||
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { Trans, useTranslation } from 'react-i18next';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
export const IPAdapterSettingsEmptyState = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const entityIdentifier = useEntityIdentifierContext('reference_image');
|
||||
const dispatch = useAppDispatch();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const onUpload = useCallback(
|
||||
(imageDTO: ImageDTO) => {
|
||||
setGlobalReferenceImage({ imageDTO, entityIdentifier, dispatch });
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
);
|
||||
const uploadApi = useImageUploadButton({ onUpload, allowMultiple: false });
|
||||
const onClickGalleryButton = useCallback(() => {
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}, [dispatch]);
|
||||
|
||||
const dndTargetData = useMemo<SetGlobalReferenceImageDndTargetData>(
|
||||
() => setGlobalReferenceImageDndTarget.getData({ entityIdentifier }),
|
||||
[entityIdentifier]
|
||||
);
|
||||
|
||||
const components = useMemo(
|
||||
() => ({
|
||||
UploadButton: (
|
||||
<Button isDisabled={isBusy} size="sm" variant="link" color="base.300" {...uploadApi.getUploadButtonProps()} />
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}),
|
||||
[isBusy, onClickGalleryButton, uploadApi]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={3} position="relative" w="full" p={4}>
|
||||
<Text textAlign="center" color="base.300">
|
||||
<Trans i18nKey="controlLayers.referenceImageEmptyState" components={components} />
|
||||
</Text>
|
||||
<input {...uploadApi.getUploadInputProps()} />
|
||||
<DndDropTarget
|
||||
dndTarget={setGlobalReferenceImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.useImage')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
IPAdapterSettingsEmptyState.displayName = 'IPAdapterSettingsEmptyState';
|
||||
@@ -1,27 +1,28 @@
|
||||
import { IconButton, Tooltip } from '@invoke-ai/ui-library';
|
||||
import type { IconButtonProps } from '@invoke-ai/ui-library';
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiTrashSimpleFill } from 'react-icons/pi';
|
||||
import { PiXBold } from 'react-icons/pi';
|
||||
|
||||
type Props = {
|
||||
type Props = Omit<IconButtonProps, 'aria-label'> & {
|
||||
onDelete: () => void;
|
||||
};
|
||||
|
||||
export const RegionalGuidanceDeletePromptButton = memo(({ onDelete }: Props) => {
|
||||
export const RegionalGuidanceDeletePromptButton = memo(({ onDelete, ...rest }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Tooltip label={t('controlLayers.deletePrompt')}>
|
||||
<IconButton
|
||||
variant="link"
|
||||
aria-label={t('controlLayers.deletePrompt')}
|
||||
icon={<PiTrashSimpleFill />}
|
||||
onClick={onDelete}
|
||||
flexGrow={0}
|
||||
size="sm"
|
||||
p={0}
|
||||
colorScheme="error"
|
||||
/>
|
||||
</Tooltip>
|
||||
<IconButton
|
||||
tooltip={t('common.delete')}
|
||||
variant="link"
|
||||
aria-label={t('common.delete')}
|
||||
icon={<PiXBold />}
|
||||
onClick={onDelete}
|
||||
flexGrow={0}
|
||||
size="sm"
|
||||
p={0}
|
||||
colorScheme="error"
|
||||
{...rest}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import { Weight } from 'features/controlLayers/components/common/Weight';
|
||||
import { IPAdapterImagePreview } from 'features/controlLayers/components/IPAdapter/IPAdapterImagePreview';
|
||||
import { IPAdapterMethod } from 'features/controlLayers/components/IPAdapter/IPAdapterMethod';
|
||||
import { IPAdapterModel } from 'features/controlLayers/components/IPAdapter/IPAdapterModel';
|
||||
import { RegionalGuidanceIPAdapterSettingsEmptyState } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceIPAdapterSettingsEmptyState';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { usePullBboxIntoRegionalGuidanceReferenceImage } from 'features/controlLayers/hooks/saveCanvasHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
@@ -19,12 +20,12 @@ import {
|
||||
rgIPAdapterWeightChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectRegionalGuidanceReferenceImage } from 'features/controlLayers/store/selectors';
|
||||
import type { CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import type { CanvasEntityIdentifier, CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import type { SetRegionalGuidanceReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
import { setRegionalGuidanceReferenceImageDndTarget } from 'features/dnd/dnd';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiBoundingBoxBold, PiTrashSimpleFill } from 'react-icons/pi';
|
||||
import { PiBoundingBoxBold, PiXBold } from 'react-icons/pi';
|
||||
import type { ImageDTO, IPAdapterModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
@@ -32,7 +33,7 @@ type Props = {
|
||||
referenceImageId: string;
|
||||
};
|
||||
|
||||
export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Props) => {
|
||||
const RegionalGuidanceIPAdapterSettingsContent = memo(({ referenceImageId }: Props) => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
@@ -115,7 +116,7 @@ export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Pro
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
icon={<PiTrashSimpleFill />}
|
||||
icon={<PiXBold />}
|
||||
tooltip={t('controlLayers.deleteReferenceImage')}
|
||||
aria-label={t('controlLayers.deleteReferenceImage')}
|
||||
onClick={onDeleteIPAdapter}
|
||||
@@ -161,4 +162,31 @@ export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Pro
|
||||
);
|
||||
});
|
||||
|
||||
RegionalGuidanceIPAdapterSettingsContent.displayName = 'RegionalGuidanceIPAdapterSettingsContent';
|
||||
|
||||
const buildSelectIPAdapterHasImage = (
|
||||
entityIdentifier: CanvasEntityIdentifier<'regional_guidance'>,
|
||||
referenceImageId: string
|
||||
) =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const referenceImage = selectRegionalGuidanceReferenceImage(canvas, entityIdentifier, referenceImageId);
|
||||
return !!referenceImage && referenceImage.ipAdapter.image !== null;
|
||||
});
|
||||
|
||||
export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Props) => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
|
||||
const selectIPAdapterHasImage = useMemo(
|
||||
() => buildSelectIPAdapterHasImage(entityIdentifier, referenceImageId),
|
||||
[entityIdentifier, referenceImageId]
|
||||
);
|
||||
const hasImage = useAppSelector(selectIPAdapterHasImage);
|
||||
|
||||
if (!hasImage) {
|
||||
return <RegionalGuidanceIPAdapterSettingsEmptyState referenceImageId={referenceImageId} />;
|
||||
}
|
||||
|
||||
return <RegionalGuidanceIPAdapterSettingsContent referenceImageId={referenceImageId} />;
|
||||
});
|
||||
|
||||
RegionalGuidanceIPAdapterSettings.displayName = 'RegionalGuidanceIPAdapterSettings';
|
||||
|
||||
@@ -0,0 +1,82 @@
|
||||
import { Button, Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import { RegionalGuidanceDeletePromptButton } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceDeletePromptButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { rgIPAdapterDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||
import type { SetRegionalGuidanceReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
import { setRegionalGuidanceReferenceImageDndTarget } from 'features/dnd/dnd';
|
||||
import { DndDropTarget } from 'features/dnd/DndDropTarget';
|
||||
import { setRegionalGuidanceReferenceImage } from 'features/imageActions/actions';
|
||||
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { Trans, useTranslation } from 'react-i18next';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
referenceImageId: string;
|
||||
};
|
||||
|
||||
export const RegionalGuidanceIPAdapterSettingsEmptyState = memo(({ referenceImageId }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const dispatch = useAppDispatch();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const onUpload = useCallback(
|
||||
(imageDTO: ImageDTO) => {
|
||||
setRegionalGuidanceReferenceImage({ imageDTO, entityIdentifier, referenceImageId, dispatch });
|
||||
},
|
||||
[dispatch, entityIdentifier, referenceImageId]
|
||||
);
|
||||
const uploadApi = useImageUploadButton({ onUpload, allowMultiple: false });
|
||||
const onClickGalleryButton = useCallback(() => {
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}, [dispatch]);
|
||||
const onDeleteIPAdapter = useCallback(() => {
|
||||
dispatch(rgIPAdapterDeleted({ entityIdentifier, referenceImageId }));
|
||||
}, [dispatch, entityIdentifier, referenceImageId]);
|
||||
|
||||
const dndTargetData = useMemo<SetRegionalGuidanceReferenceImageDndTargetData>(
|
||||
() =>
|
||||
setRegionalGuidanceReferenceImageDndTarget.getData({
|
||||
entityIdentifier,
|
||||
referenceImageId,
|
||||
}),
|
||||
[entityIdentifier, referenceImageId]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={3} position="relative" w="full" p={4}>
|
||||
<RegionalGuidanceDeletePromptButton onDelete={onDeleteIPAdapter} position="absolute" top={0} insetInlineEnd={0} />
|
||||
<Text textAlign="center" color="base.300">
|
||||
<Trans
|
||||
i18nKey="controlLayers.referenceImageEmptyState"
|
||||
components={{
|
||||
UploadButton: (
|
||||
<Button
|
||||
isDisabled={isBusy}
|
||||
size="sm"
|
||||
variant="link"
|
||||
color="base.300"
|
||||
{...uploadApi.getUploadButtonProps()}
|
||||
/>
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}}
|
||||
/>
|
||||
</Text>
|
||||
<input {...uploadApi.getUploadInputProps()} />
|
||||
<DndDropTarget
|
||||
dndTarget={setRegionalGuidanceReferenceImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.useImage')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
RegionalGuidanceIPAdapterSettingsEmptyState.displayName = 'RegionalGuidanceIPAdapterSettingsEmptyState';
|
||||
@@ -1,4 +1,6 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { withResultAsync } from 'common/util/result';
|
||||
import { selectSelectedImage } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
@@ -14,6 +16,7 @@ const TOAST_ID = 'SAVE_STAGING_AREA_IMAGE_TO_GALLERY';
|
||||
export const StagingAreaToolbarSaveSelectedToGalleryButton = memo(() => {
|
||||
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
|
||||
const selectedImage = useAppSelector(selectSelectedImage);
|
||||
const authToken = useStore($authToken);
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
@@ -26,7 +29,14 @@ export const StagingAreaToolbarSaveSelectedToGalleryButton = memo(() => {
|
||||
// the gallery without borking the canvas, which may need this image to exist.
|
||||
const result = await withResultAsync(async () => {
|
||||
// Download the image
|
||||
const res = await fetch(selectedImage.imageDTO.image_url);
|
||||
const requestOpts = authToken
|
||||
? {
|
||||
headers: {
|
||||
Authorization: `Bearer ${authToken}`,
|
||||
},
|
||||
}
|
||||
: {};
|
||||
const res = await fetch(selectedImage.imageDTO.image_url, requestOpts);
|
||||
const blob = await res.blob();
|
||||
// Create a new file with the same name, which we will upload
|
||||
const file = new File([blob], `copy_of_${selectedImage.imageDTO.image_name}`, { type: 'image/png' });
|
||||
@@ -56,7 +66,7 @@ export const StagingAreaToolbarSaveSelectedToGalleryButton = memo(() => {
|
||||
status: 'error',
|
||||
});
|
||||
}
|
||||
}, [autoAddBoardId, selectedImage, t]);
|
||||
}, [autoAddBoardId, selectedImage, t, authToken]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
|
||||
@@ -4,8 +4,8 @@ import { CanvasSettingsPopover } from 'features/controlLayers/components/Setting
|
||||
import { ToolColorPicker } from 'features/controlLayers/components/Tool/ToolFillColorPicker';
|
||||
import { ToolSettings } from 'features/controlLayers/components/Tool/ToolSettings';
|
||||
import { CanvasToolbarFitBboxToLayersButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarFitBboxToLayersButton';
|
||||
import { CanvasToolbarNewSessionMenuButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarNewSessionMenuButton';
|
||||
import { CanvasToolbarRedoButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarRedoButton';
|
||||
import { CanvasToolbarResetCanvasButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarResetCanvasButton';
|
||||
import { CanvasToolbarResetViewButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarResetViewButton';
|
||||
import { CanvasToolbarSaveToGalleryButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarSaveToGalleryButton';
|
||||
import { CanvasToolbarScale } from 'features/controlLayers/components/Toolbar/CanvasToolbarScale';
|
||||
@@ -43,7 +43,7 @@ export const CanvasToolbar = memo(() => {
|
||||
<CanvasToolbarSaveToGalleryButton />
|
||||
<CanvasToolbarUndoButton />
|
||||
<CanvasToolbarRedoButton />
|
||||
<CanvasToolbarResetCanvasButton />
|
||||
<CanvasToolbarNewSessionMenuButton />
|
||||
<CanvasSettingsPopover />
|
||||
</Flex>
|
||||
</Flex>
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
import { IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
|
||||
import { SessionMenuItems } from 'common/components/SessionMenuItems';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiFilePlusBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasToolbarNewSessionMenuButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Menu placement="bottom-end">
|
||||
<MenuButton
|
||||
as={IconButton}
|
||||
aria-label={t('controlLayers.newSession')}
|
||||
icon={<PiFilePlusBold />}
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
/>
|
||||
<MenuList>
|
||||
<SessionMenuItems />
|
||||
</MenuList>
|
||||
</Menu>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasToolbarNewSessionMenuButton.displayName = 'CanvasToolbarNewSessionMenuButton';
|
||||
@@ -1,30 +0,0 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiTrashBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasToolbarResetCanvasButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const canvasManager = useCanvasManager();
|
||||
const onClick = useCallback(() => {
|
||||
dispatch(canvasReset());
|
||||
canvasManager.stage.fitLayersToStage();
|
||||
}, [canvasManager.stage, dispatch]);
|
||||
return (
|
||||
<IconButton
|
||||
aria-label={t('controlLayers.resetCanvas')}
|
||||
tooltip={t('controlLayers.resetCanvas')}
|
||||
onClick={onClick}
|
||||
colorScheme="error"
|
||||
icon={<PiTrashBold />}
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasToolbarResetCanvasButton.displayName = 'CanvasToolbarResetCanvasButton';
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityDeleteButton } from 'features/controlLayers/components/common/CanvasEntityDeleteButton';
|
||||
import { CanvasEntityEnabledToggle } from 'features/controlLayers/components/common/CanvasEntityEnabledToggle';
|
||||
import { CanvasEntityHeaderWarnings } from 'features/controlLayers/components/common/CanvasEntityHeaderWarnings';
|
||||
import { CanvasEntityIsBookmarkedForQuickSwitchToggle } from 'features/controlLayers/components/common/CanvasEntityIsBookmarkedForQuickSwitchToggle';
|
||||
import { CanvasEntityIsLockedToggle } from 'features/controlLayers/components/common/CanvasEntityIsLockedToggle';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
@@ -11,6 +12,7 @@ export const CanvasEntityHeaderCommonActions = memo(() => {
|
||||
|
||||
return (
|
||||
<Flex alignSelf="stretch">
|
||||
<CanvasEntityHeaderWarnings />
|
||||
<CanvasEntityIsBookmarkedForQuickSwitchToggle />
|
||||
{entityIdentifier.type !== 'reference_image' && <CanvasEntityIsLockedToggle />}
|
||||
<CanvasEntityEnabledToggle />
|
||||
|
||||
@@ -0,0 +1,101 @@
|
||||
import { Flex, IconButton, ListItem, Text, UnorderedList } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { EMPTY_ARRAY } from 'app/store/constants';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useEntityIsEnabled } from 'features/controlLayers/hooks/useEntityIsEnabled';
|
||||
import { selectModel } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import {
|
||||
getControlLayerWarnings,
|
||||
getGlobalReferenceImageWarnings,
|
||||
getInpaintMaskWarnings,
|
||||
getRasterLayerWarnings,
|
||||
getRegionalGuidanceWarnings,
|
||||
} from 'features/controlLayers/store/validators';
|
||||
import type { TFunction } from 'i18next';
|
||||
import { upperFirst } from 'lodash-es';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiWarningBold } from 'react-icons/pi';
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const buildSelectWarnings = (entityIdentifier: CanvasEntityIdentifier, t: TFunction) => {
|
||||
return createSelector(selectCanvasSlice, selectModel, (canvas, model) => {
|
||||
// This component is used within a <CanvasEntityStateGate /> so we can safely assume that the entity exists.
|
||||
// Should never throw.
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier, 'CanvasEntityHeaderWarnings');
|
||||
|
||||
let warnings: string[] = [];
|
||||
|
||||
const entityType = entity.type;
|
||||
|
||||
if (entityType === 'control_layer') {
|
||||
warnings = getControlLayerWarnings(entity, model);
|
||||
} else if (entityType === 'regional_guidance') {
|
||||
warnings = getRegionalGuidanceWarnings(entity, model);
|
||||
} else if (entityType === 'inpaint_mask') {
|
||||
warnings = getInpaintMaskWarnings(entity, model);
|
||||
} else if (entityType === 'raster_layer') {
|
||||
warnings = getRasterLayerWarnings(entity, model);
|
||||
} else if (entityType === 'reference_image') {
|
||||
warnings = getGlobalReferenceImageWarnings(entity, model);
|
||||
} else {
|
||||
assert<Equals<typeof entityType, never>>(false, 'Unexpected entity type');
|
||||
}
|
||||
|
||||
// Return a stable reference if there are no warnings
|
||||
if (warnings.length === 0) {
|
||||
return EMPTY_ARRAY;
|
||||
}
|
||||
|
||||
return warnings.map((w) => t(w)).map(upperFirst);
|
||||
});
|
||||
};
|
||||
|
||||
export const CanvasEntityHeaderWarnings = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const { t } = useTranslation();
|
||||
const isEnabled = useEntityIsEnabled(entityIdentifier);
|
||||
const selectWarnings = useMemo(() => buildSelectWarnings(entityIdentifier, t), [entityIdentifier, t]);
|
||||
const warnings = useAppSelector(selectWarnings);
|
||||
|
||||
if (warnings.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
// Using IconButton here bc it matches the styling of the actual buttons in the header without any fanagling, but
|
||||
// it's not a button
|
||||
<IconButton
|
||||
as="span"
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
aria-label="warnings"
|
||||
tooltip={<TooltipContent warnings={warnings} />}
|
||||
icon={<PiWarningBold />}
|
||||
colorScheme="warning"
|
||||
isDisabled={!isEnabled}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasEntityHeaderWarnings.displayName = 'CanvasEntityHeaderWarnings';
|
||||
|
||||
const TooltipContent = memo((props: { warnings: string[] }) => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Flex flexDir="column">
|
||||
<Text>{t('common.warnings')}:</Text>
|
||||
<UnorderedList>
|
||||
{props.warnings.map((warning, index) => (
|
||||
<ListItem key={index}>{warning}</ListItem>
|
||||
))}
|
||||
</UnorderedList>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
TooltipContent.displayName = 'TooltipContent';
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
getEmptyRect,
|
||||
getKonvaNodeDebugAttrs,
|
||||
getPrefixedId,
|
||||
offsetCoord,
|
||||
} from 'features/controlLayers/konva/util';
|
||||
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import type { Coordinate, Rect, RectWithRotation } from 'features/controlLayers/store/types';
|
||||
@@ -558,6 +559,25 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
|
||||
this.manager.stateApi.setEntityPosition({ entityIdentifier: this.parent.entityIdentifier, position });
|
||||
};
|
||||
|
||||
nudgeBy = (offset: Coordinate) => {
|
||||
// We can immediately move both the proxy rect and layer objects so we don't have to wait for a redux round-trip,
|
||||
// which can take up to 2ms in my testing. This is optional, but can make the interaction feel more responsive,
|
||||
// especially on lower-end devices.
|
||||
// Get the relative position of the layer's objects, according to konva
|
||||
const position = this.konva.proxyRect.position();
|
||||
// Offset the position by the nudge amount
|
||||
const newPosition = offsetCoord(position, offset);
|
||||
// Set the new position of the proxy rect - this doesn't move the layer objects - only the outline rect
|
||||
this.konva.proxyRect.setAttrs(newPosition);
|
||||
// Sync the layer objects with the proxy rect - moves them to the new position
|
||||
this.syncObjectGroupWithProxyRect();
|
||||
|
||||
// Push to redux. The state change will do a round-trip, and eventually make it back to the canvas classes, at
|
||||
// which point the layer will be moved to the new position.
|
||||
this.manager.stateApi.moveEntityBy({ entityIdentifier: this.parent.entityIdentifier, offset });
|
||||
this.log.trace({ offset }, 'Nudged');
|
||||
};
|
||||
|
||||
syncObjectGroupWithProxyRect = () => {
|
||||
this.parent.renderer.konva.objectGroup.setAttrs({
|
||||
x: this.konva.proxyRect.x(),
|
||||
|
||||
@@ -20,7 +20,8 @@ import {
|
||||
controlLayerAdded,
|
||||
entityBrushLineAdded,
|
||||
entityEraserLineAdded,
|
||||
entityMoved,
|
||||
entityMovedBy,
|
||||
entityMovedTo,
|
||||
entityRasterized,
|
||||
entityRectAdded,
|
||||
entityReset,
|
||||
@@ -40,7 +41,8 @@ import type {
|
||||
EntityBrushLineAddedPayload,
|
||||
EntityEraserLineAddedPayload,
|
||||
EntityIdentifierPayload,
|
||||
EntityMovedPayload,
|
||||
EntityMovedByPayload,
|
||||
EntityMovedToPayload,
|
||||
EntityRasterizedPayload,
|
||||
EntityRectAddedPayload,
|
||||
Rect,
|
||||
@@ -51,7 +53,7 @@ import type { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import { atom, computed } from 'nanostores';
|
||||
import type { Logger } from 'roarr';
|
||||
import { getImageDTO } from 'services/api/endpoints/images';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig, ImageDTO, S } from 'services/api/types';
|
||||
import { QueueError } from 'services/events/errors';
|
||||
import type { Param0 } from 'tsafe';
|
||||
@@ -139,8 +141,15 @@ export class CanvasStateApiModule extends CanvasModuleBase {
|
||||
/**
|
||||
* Updates an entity's position, pushing state to redux.
|
||||
*/
|
||||
setEntityPosition = (arg: EntityMovedPayload) => {
|
||||
this.store.dispatch(entityMoved(arg));
|
||||
setEntityPosition = (arg: EntityMovedToPayload) => {
|
||||
this.store.dispatch(entityMovedTo(arg));
|
||||
};
|
||||
|
||||
/**
|
||||
* Moves an entity by the give offset, pushing state to redux.
|
||||
*/
|
||||
moveEntityBy = (arg: EntityMovedByPayload) => {
|
||||
this.store.dispatch(entityMovedBy(arg));
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -402,7 +411,7 @@ export class CanvasStateApiModule extends CanvasModuleBase {
|
||||
queueApi.endpoints.enqueueBatch.initiate(batch, {
|
||||
// Use the same cache key for all enqueueBatch requests, so that all consumers of this query get the same status
|
||||
// updates.
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
...enqueueMutationFixedCacheKeyOptions,
|
||||
// We do not need RTK to track this request in the store
|
||||
track: false,
|
||||
})
|
||||
|
||||
@@ -1,9 +1,24 @@
|
||||
import { $focusedRegion } from 'common/hooks/focus';
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
|
||||
import type { CanvasToolModule } from 'features/controlLayers/konva/CanvasTool/CanvasToolModule';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import type { Coordinate } from 'features/controlLayers/store/types';
|
||||
import type { Logger } from 'roarr';
|
||||
|
||||
type CanvasMoveToolModuleConfig = {
|
||||
/**
|
||||
* The number of pixels to nudge the entity by when moving with the arrow keys.
|
||||
*/
|
||||
NUDGE_PX: number;
|
||||
};
|
||||
|
||||
const DEFAULT_CONFIG: CanvasMoveToolModuleConfig = {
|
||||
NUDGE_PX: 1,
|
||||
};
|
||||
|
||||
type NudgeKey = 'ArrowLeft' | 'ArrowRight' | 'ArrowUp' | 'ArrowDown';
|
||||
|
||||
export class CanvasMoveToolModule extends CanvasModuleBase {
|
||||
readonly type = 'move_tool';
|
||||
readonly id: string;
|
||||
@@ -12,6 +27,9 @@ export class CanvasMoveToolModule extends CanvasModuleBase {
|
||||
readonly manager: CanvasManager;
|
||||
readonly log: Logger;
|
||||
|
||||
config: CanvasMoveToolModuleConfig = DEFAULT_CONFIG;
|
||||
nudgeOffsets: Record<NudgeKey, Coordinate>;
|
||||
|
||||
constructor(parent: CanvasToolModule) {
|
||||
super();
|
||||
this.id = getPrefixedId(this.type);
|
||||
@@ -19,8 +37,18 @@ export class CanvasMoveToolModule extends CanvasModuleBase {
|
||||
this.manager = this.parent.manager;
|
||||
this.path = this.manager.buildPath(this);
|
||||
this.log = this.manager.buildLogger(this);
|
||||
|
||||
this.log.debug('Creating module');
|
||||
|
||||
this.nudgeOffsets = {
|
||||
ArrowLeft: { x: -this.config.NUDGE_PX, y: 0 },
|
||||
ArrowRight: { x: this.config.NUDGE_PX, y: 0 },
|
||||
ArrowUp: { x: 0, y: -this.config.NUDGE_PX },
|
||||
ArrowDown: { x: 0, y: this.config.NUDGE_PX },
|
||||
};
|
||||
}
|
||||
|
||||
isNudgeKey(key: string): key is NudgeKey {
|
||||
return this.nudgeOffsets[key as NudgeKey] !== undefined;
|
||||
}
|
||||
|
||||
syncCursorStyle = () => {
|
||||
@@ -32,4 +60,45 @@ export class CanvasMoveToolModule extends CanvasModuleBase {
|
||||
selectedEntity.transformer.syncCursorStyle();
|
||||
}
|
||||
};
|
||||
|
||||
nudge = (nudgeKey: NudgeKey) => {
|
||||
if ($focusedRegion.get() !== 'canvas') {
|
||||
return;
|
||||
}
|
||||
|
||||
const selectedEntity = this.manager.stateApi.getSelectedEntityAdapter();
|
||||
|
||||
if (!selectedEntity) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
selectedEntity.$isDisabled.get() ||
|
||||
selectedEntity.$isEmpty.get() ||
|
||||
selectedEntity.$isLocked.get() ||
|
||||
selectedEntity.$isEntityTypeHidden.get()
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
const isBusy = this.manager.$isBusy.get();
|
||||
const isMoveToolSelected = this.parent.$tool.get() === 'move';
|
||||
const isThisEntityTransforming = this.manager.stateApi.$transformingAdapter.get() === selectedEntity;
|
||||
|
||||
if (isBusy) {
|
||||
// When the canvas is busy, we shouldn't allow nudging - except when the canvas is busy transforming the selected
|
||||
// entity. Nudging is allowed during transformation, regardless of the selected tool.
|
||||
if (!isThisEntityTransforming) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
// Otherwise, the canvas is not busy, and we should only allow nudging when the move tool is selected.
|
||||
if (!isMoveToolSelected) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const offset = this.nudgeOffsets[nudgeKey];
|
||||
selectedEntity.transformer.nudgeBy(offset);
|
||||
};
|
||||
}
|
||||
|
||||
@@ -528,11 +528,16 @@ export class CanvasToolModule extends CanvasModuleBase {
|
||||
};
|
||||
|
||||
onKeyDown = (e: KeyboardEvent) => {
|
||||
if (e.repeat) {
|
||||
if (e.target instanceof HTMLInputElement || e.target instanceof HTMLTextAreaElement) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (e.target instanceof HTMLInputElement || e.target instanceof HTMLTextAreaElement) {
|
||||
// Handle nudging - must be before repeat, as we may want to catch repeating keys
|
||||
if (this.tools.move.isNudgeKey(e.key)) {
|
||||
this.tools.move.nudge(e.key);
|
||||
}
|
||||
|
||||
if (e.repeat) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ import type {
|
||||
CanvasEntityType,
|
||||
CanvasInpaintMaskState,
|
||||
CanvasMetadata,
|
||||
EntityMovedByPayload,
|
||||
FillStyle,
|
||||
RegionalGuidanceReferenceImageState,
|
||||
RgbColor,
|
||||
@@ -51,7 +52,7 @@ import type {
|
||||
EntityBrushLineAddedPayload,
|
||||
EntityEraserLineAddedPayload,
|
||||
EntityIdentifierPayload,
|
||||
EntityMovedPayload,
|
||||
EntityMovedToPayload,
|
||||
EntityRasterizedPayload,
|
||||
EntityRectAddedPayload,
|
||||
IPMethodV2,
|
||||
@@ -1201,7 +1202,7 @@ export const canvasSlice = createSlice({
|
||||
}
|
||||
entity.fill.style = style;
|
||||
},
|
||||
entityMoved: (state, action: PayloadAction<EntityMovedPayload>) => {
|
||||
entityMovedTo: (state, action: PayloadAction<EntityMovedToPayload>) => {
|
||||
const { entityIdentifier, position } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (!entity) {
|
||||
@@ -1212,6 +1213,20 @@ export const canvasSlice = createSlice({
|
||||
entity.position = position;
|
||||
}
|
||||
},
|
||||
entityMovedBy: (state, action: PayloadAction<EntityMovedByPayload>) => {
|
||||
const { entityIdentifier, offset } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
if (!entity) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!isRenderableEntity(entity)) {
|
||||
return;
|
||||
}
|
||||
|
||||
entity.position.x += offset.x;
|
||||
entity.position.y += offset.y;
|
||||
},
|
||||
entityRasterized: (state, action: PayloadAction<EntityRasterizedPayload>) => {
|
||||
const { entityIdentifier, imageObject, position, replaceObjects, isSelected } = action.payload;
|
||||
const entity = selectEntity(state, entityIdentifier);
|
||||
@@ -1505,7 +1520,8 @@ export const {
|
||||
entityIsLockedToggled,
|
||||
entityFillColorChanged,
|
||||
entityFillStyleChanged,
|
||||
entityMoved,
|
||||
entityMovedTo,
|
||||
entityMovedBy,
|
||||
entityDuplicated,
|
||||
entityRasterized,
|
||||
entityBrushLineAdded,
|
||||
|
||||
@@ -83,7 +83,7 @@ const initialState: ParamsState = {
|
||||
canvasCoherenceMode: 'Gaussian Blur',
|
||||
canvasCoherenceMinDenoise: 0,
|
||||
canvasCoherenceEdgeSize: 16,
|
||||
infillMethod: 'patchmatch',
|
||||
infillMethod: 'lama',
|
||||
infillTileSize: 32,
|
||||
infillPatchmatchDownscaleSize: 1,
|
||||
infillColorValue: { r: 0, g: 0, b: 0, a: 1 },
|
||||
@@ -273,24 +273,27 @@ export const paramsSlice = createSlice({
|
||||
setCanvasCoherenceMinDenoise: (state, action: PayloadAction<number>) => {
|
||||
state.canvasCoherenceMinDenoise = action.payload;
|
||||
},
|
||||
paramsReset: (state) => resetState(state),
|
||||
},
|
||||
extraReducers(builder) {
|
||||
builder.addMatcher(newSessionRequested, (state) => {
|
||||
// When a new session is requested, we need to keep the current model selections, plus dependent state
|
||||
// like VAE precision. Everything else gets reset to default.
|
||||
const newState = deepClone(initialState);
|
||||
newState.model = state.model;
|
||||
newState.vae = state.vae;
|
||||
newState.fluxVAE = state.fluxVAE;
|
||||
newState.vaePrecision = state.vaePrecision;
|
||||
newState.t5EncoderModel = state.t5EncoderModel;
|
||||
newState.clipEmbedModel = state.clipEmbedModel;
|
||||
newState.refinerModel = state.refinerModel;
|
||||
return newState;
|
||||
});
|
||||
builder.addMatcher(newSessionRequested, (state) => resetState(state));
|
||||
},
|
||||
});
|
||||
|
||||
const resetState = (state: ParamsState): ParamsState => {
|
||||
// When a new session is requested, we need to keep the current model selections, plus dependent state
|
||||
// like VAE precision. Everything else gets reset to default.
|
||||
const newState = deepClone(initialState);
|
||||
newState.model = state.model;
|
||||
newState.vae = state.vae;
|
||||
newState.fluxVAE = state.fluxVAE;
|
||||
newState.vaePrecision = state.vaePrecision;
|
||||
newState.t5EncoderModel = state.t5EncoderModel;
|
||||
newState.clipEmbedModel = state.clipEmbedModel;
|
||||
newState.refinerModel = state.refinerModel;
|
||||
return newState;
|
||||
};
|
||||
|
||||
export const {
|
||||
setInfillMethod,
|
||||
setInfillTileSize,
|
||||
@@ -334,6 +337,7 @@ export const {
|
||||
setRefinerNegativeAestheticScore,
|
||||
setRefinerStart,
|
||||
modelChanged,
|
||||
paramsReset,
|
||||
} = paramsSlice.actions;
|
||||
|
||||
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
|
||||
|
||||
@@ -439,7 +439,8 @@ export type EntityIdentifierPayload<
|
||||
entityIdentifier: CanvasEntityIdentifier<U>;
|
||||
} & T;
|
||||
|
||||
export type EntityMovedPayload = EntityIdentifierPayload<{ position: Coordinate }>;
|
||||
export type EntityMovedToPayload = EntityIdentifierPayload<{ position: Coordinate }>;
|
||||
export type EntityMovedByPayload = EntityIdentifierPayload<{ offset: Coordinate }>;
|
||||
export type EntityBrushLineAddedPayload = EntityIdentifierPayload<{
|
||||
brushLine: CanvasBrushLineState | CanvasBrushLineWithPressureState;
|
||||
}>;
|
||||
|
||||
@@ -0,0 +1,133 @@
|
||||
import type {
|
||||
CanvasControlLayerState,
|
||||
CanvasInpaintMaskState,
|
||||
CanvasRasterLayerState,
|
||||
CanvasReferenceImageState,
|
||||
CanvasRegionalGuidanceState,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import type { ParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
|
||||
export const getRegionalGuidanceWarnings = (
|
||||
entity: CanvasRegionalGuidanceState,
|
||||
model: ParameterModel | null
|
||||
): string[] => {
|
||||
const warnings: string[] = [];
|
||||
|
||||
if (entity.objects.length === 0) {
|
||||
// Layer is in empty state - skip other checks
|
||||
warnings.push('parameters.invoke.layer.emptyLayer');
|
||||
} else {
|
||||
if (entity.positivePrompt === null && entity.negativePrompt === null && entity.referenceImages.length === 0) {
|
||||
// Must have at least 1 prompt or IP Adapter
|
||||
warnings.push('parameters.invoke.layer.rgNoPromptsOrIPAdapters');
|
||||
}
|
||||
|
||||
if (model) {
|
||||
if (model.base === 'sd-3' || model.base === 'sd-2') {
|
||||
// Unsupported model architecture
|
||||
warnings.push('parameters.invoke.layer.unsupportedModel');
|
||||
} else if (model.base === 'flux') {
|
||||
// Some features are not supported for flux models
|
||||
if (entity.negativePrompt !== null) {
|
||||
warnings.push('parameters.invoke.layer.rgNegativePromptNotSupported');
|
||||
}
|
||||
if (entity.referenceImages.length > 0) {
|
||||
warnings.push('parameters.invoke.layer.rgReferenceImagesNotSupported');
|
||||
}
|
||||
if (entity.autoNegative) {
|
||||
warnings.push('parameters.invoke.layer.rgAutoNegativeNotSupported');
|
||||
}
|
||||
} else {
|
||||
entity.referenceImages.forEach(({ ipAdapter }) => {
|
||||
if (!ipAdapter.model) {
|
||||
// No model selected
|
||||
warnings.push('parameters.invoke.layer.ipAdapterNoModelSelected');
|
||||
} else if (ipAdapter.model.base !== model.base) {
|
||||
// Supported model architecture but doesn't match
|
||||
warnings.push('parameters.invoke.layer.ipAdapterIncompatibleBaseModel');
|
||||
}
|
||||
|
||||
if (!ipAdapter.image) {
|
||||
// No image selected
|
||||
warnings.push('parameters.invoke.layer.ipAdapterNoImageSelected');
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return warnings;
|
||||
};
|
||||
|
||||
export const getGlobalReferenceImageWarnings = (
|
||||
entity: CanvasReferenceImageState,
|
||||
model: ParameterModel | null
|
||||
): string[] => {
|
||||
const warnings: string[] = [];
|
||||
|
||||
if (!entity.ipAdapter.model) {
|
||||
// No model selected
|
||||
warnings.push('parameters.invoke.layer.ipAdapterNoModelSelected');
|
||||
} else if (model) {
|
||||
if (model.base === 'sd-3' || model.base === 'sd-2') {
|
||||
// Unsupported model architecture
|
||||
warnings.push('parameters.invoke.layer.unsupportedModel');
|
||||
} else if (entity.ipAdapter.model.base !== model.base) {
|
||||
// Supported model architecture but doesn't match
|
||||
warnings.push('parameters.invoke.layer.ipAdapterIncompatibleBaseModel');
|
||||
}
|
||||
}
|
||||
|
||||
if (!entity.ipAdapter.image) {
|
||||
// No image selected
|
||||
warnings.push('parameters.invoke.layer.ipAdapterNoImageSelected');
|
||||
}
|
||||
|
||||
return warnings;
|
||||
};
|
||||
|
||||
export const getControlLayerWarnings = (entity: CanvasControlLayerState, model: ParameterModel | null): string[] => {
|
||||
const warnings: string[] = [];
|
||||
|
||||
if (entity.objects.length === 0) {
|
||||
// Layer is in empty state - skip other checks
|
||||
warnings.push('parameters.invoke.layer.emptyLayer');
|
||||
} else {
|
||||
if (!entity.controlAdapter.model) {
|
||||
// No model selected
|
||||
warnings.push('parameters.invoke.layer.controlAdapterNoModelSelected');
|
||||
} else if (model) {
|
||||
if (model.base === 'sd-3' || model.base === 'sd-2') {
|
||||
// Unsupported model architecture
|
||||
warnings.push('parameters.invoke.layer.unsupportedModel');
|
||||
} else if (entity.controlAdapter.model.base !== model.base) {
|
||||
// Supported model architecture but doesn't match
|
||||
warnings.push('parameters.invoke.layer.controlAdapterIncompatibleBaseModel');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return warnings;
|
||||
};
|
||||
|
||||
export const getRasterLayerWarnings = (entity: CanvasRasterLayerState, _model: ParameterModel | null): string[] => {
|
||||
const warnings: string[] = [];
|
||||
|
||||
if (entity.objects.length === 0) {
|
||||
// Layer is in empty state - skip other checks
|
||||
warnings.push('parameters.invoke.layer.emptyLayer');
|
||||
}
|
||||
|
||||
return warnings;
|
||||
};
|
||||
|
||||
export const getInpaintMaskWarnings = (entity: CanvasInpaintMaskState, _model: ParameterModel | null): string[] => {
|
||||
const warnings: string[] = [];
|
||||
|
||||
if (entity.objects.length === 0) {
|
||||
// Layer is in empty state - skip other checks
|
||||
warnings.push('parameters.invoke.layer.emptyLayer');
|
||||
}
|
||||
|
||||
return warnings;
|
||||
};
|
||||
@@ -7,7 +7,7 @@ const zSeedBehaviour = z.enum(['PER_ITERATION', 'PER_PROMPT']);
|
||||
type SeedBehaviour = z.infer<typeof zSeedBehaviour>;
|
||||
export const isSeedBehaviour = (v: unknown): v is SeedBehaviour => zSeedBehaviour.safeParse(v).success;
|
||||
|
||||
interface DynamicPromptsState {
|
||||
export interface DynamicPromptsState {
|
||||
_version: 1;
|
||||
maxPrompts: number;
|
||||
combinatorial: boolean;
|
||||
|
||||
@@ -0,0 +1,110 @@
|
||||
import { Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppStore } from 'app/store/nanostores/store';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { selectIsSD3 } from 'features/controlLayers/store/paramsSlice';
|
||||
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
|
||||
import { newCanvasFromImage } from 'features/imageActions/actions';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiFileBold, PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const ImageMenuItemNewCanvasFromImageSubMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const subMenu = useSubMenu();
|
||||
const store = useAppStore();
|
||||
const imageDTO = useImageDTOContext();
|
||||
const imageViewer = useImageViewer();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const isSD3 = useAppSelector(selectIsSD3);
|
||||
|
||||
const onClickNewCanvasWithRasterLayerFromImage = useCallback(() => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: false, type: 'raster_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
status: 'success',
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewCanvasWithControlLayerFromImage = useCallback(() => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: false, type: 'control_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
status: 'success',
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewCanvasWithRasterLayerFromImageWithResize = useCallback(() => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: true, type: 'raster_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
status: 'success',
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewCanvasWithControlLayerFromImageWithResize = useCallback(() => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, withResize: true, type: 'control_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
status: 'success',
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
return (
|
||||
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiPlusBold />}>
|
||||
<Menu {...subMenu.menuProps}>
|
||||
<MenuButton {...subMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('controlLayers.newCanvasFromImage')} />
|
||||
</MenuButton>
|
||||
<MenuList {...subMenu.menuListProps}>
|
||||
<MenuItem icon={<PiFileBold />} onClickCapture={onClickNewCanvasWithRasterLayerFromImage} isDisabled={isBusy}>
|
||||
{t('controlLayers.asRasterLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem
|
||||
icon={<PiFileBold />}
|
||||
onClickCapture={onClickNewCanvasWithRasterLayerFromImageWithResize}
|
||||
isDisabled={isBusy}
|
||||
>
|
||||
{t('controlLayers.asRasterLayerResize')}
|
||||
</MenuItem>
|
||||
<MenuItem
|
||||
icon={<PiFileBold />}
|
||||
onClickCapture={onClickNewCanvasWithControlLayerFromImage}
|
||||
isDisabled={isBusy || isSD3}
|
||||
>
|
||||
{t('controlLayers.asControlLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem
|
||||
icon={<PiFileBold />}
|
||||
onClickCapture={onClickNewCanvasWithControlLayerFromImageWithResize}
|
||||
isDisabled={isBusy || isSD3}
|
||||
>
|
||||
{t('controlLayers.asControlLayerResize')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
);
|
||||
});
|
||||
|
||||
ImageMenuItemNewCanvasFromImageSubMenu.displayName = 'ImageMenuItemNewCanvasFromImageSubMenu';
|
||||
@@ -8,14 +8,14 @@ import { selectIsFLUX, selectIsSD3 } from 'features/controlLayers/store/paramsSl
|
||||
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { useImageDTOContext } from 'features/gallery/contexts/ImageDTOContext';
|
||||
import { sentImageToCanvas } from 'features/gallery/store/actions';
|
||||
import { createNewCanvasEntityFromImage, newCanvasFromImage } from 'features/imageActions/actions';
|
||||
import { createNewCanvasEntityFromImage } from 'features/imageActions/actions';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiFileBold, PiPlusBold } from 'react-icons/pi';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const ImageMenuItemNewFromImageSubMenu = memo(() => {
|
||||
export const ImageMenuItemNewLayerFromImageSubMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const subMenu = useSubMenu();
|
||||
const store = useAppStore();
|
||||
@@ -25,30 +25,6 @@ export const ImageMenuItemNewFromImageSubMenu = memo(() => {
|
||||
const isFLUX = useAppSelector(selectIsFLUX);
|
||||
const isSD3 = useAppSelector(selectIsSD3);
|
||||
|
||||
const onClickNewCanvasWithRasterLayerFromImage = useCallback(() => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, type: 'raster_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
status: 'success',
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewCanvasWithControlLayerFromImage = useCallback(() => {
|
||||
const { dispatch, getState } = store;
|
||||
newCanvasFromImage({ imageDTO, type: 'control_layer', dispatch, getState });
|
||||
dispatch(setActiveTab('canvas'));
|
||||
imageViewer.close();
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
status: 'success',
|
||||
});
|
||||
}, [imageDTO, imageViewer, store, t]);
|
||||
|
||||
const onClickNewRasterLayerFromImage = useCallback(() => {
|
||||
const { dispatch, getState } = store;
|
||||
createNewCanvasEntityFromImage({ imageDTO, type: 'raster_layer', dispatch, getState });
|
||||
@@ -105,19 +81,9 @@ export const ImageMenuItemNewFromImageSubMenu = memo(() => {
|
||||
<MenuItem {...subMenu.parentMenuItemProps} icon={<PiPlusBold />}>
|
||||
<Menu {...subMenu.menuProps}>
|
||||
<MenuButton {...subMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('controlLayers.newFromImage')} />
|
||||
<SubMenuButtonContent label={t('controlLayers.newLayerFromImage')} />
|
||||
</MenuButton>
|
||||
<MenuList {...subMenu.menuListProps}>
|
||||
<MenuItem icon={<PiFileBold />} onClickCapture={onClickNewCanvasWithRasterLayerFromImage} isDisabled={isBusy}>
|
||||
{t('controlLayers.canvasAsRasterLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem
|
||||
icon={<PiFileBold />}
|
||||
onClickCapture={onClickNewCanvasWithControlLayerFromImage}
|
||||
isDisabled={isBusy || isSD3}
|
||||
>
|
||||
{t('controlLayers.canvasAsControlLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<NewLayerIcon />} onClickCapture={onClickNewInpaintMaskFromImage} isDisabled={isBusy}>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</MenuItem>
|
||||
@@ -144,4 +110,4 @@ export const ImageMenuItemNewFromImageSubMenu = memo(() => {
|
||||
);
|
||||
});
|
||||
|
||||
ImageMenuItemNewFromImageSubMenu.displayName = 'ImageMenuItemNewFromImageSubMenu';
|
||||
ImageMenuItemNewLayerFromImageSubMenu.displayName = 'ImageMenuItemNewLayerFromImageSubMenu';
|
||||
@@ -7,7 +7,8 @@ import { ImageMenuItemDelete } from 'features/gallery/components/ImageContextMen
|
||||
import { ImageMenuItemDownload } from 'features/gallery/components/ImageContextMenu/ImageMenuItemDownload';
|
||||
import { ImageMenuItemLoadWorkflow } from 'features/gallery/components/ImageContextMenu/ImageMenuItemLoadWorkflow';
|
||||
import { ImageMenuItemMetadataRecallActions } from 'features/gallery/components/ImageContextMenu/ImageMenuItemMetadataRecallActions';
|
||||
import { ImageMenuItemNewFromImageSubMenu } from 'features/gallery/components/ImageContextMenu/ImageMenuItemNewFromImageSubMenu';
|
||||
import { ImageMenuItemNewCanvasFromImageSubMenu } from 'features/gallery/components/ImageContextMenu/ImageMenuItemNewCanvasFromImageSubMenu';
|
||||
import { ImageMenuItemNewLayerFromImageSubMenu } from 'features/gallery/components/ImageContextMenu/ImageMenuItemNewLayerFromImageSubMenu';
|
||||
import { ImageMenuItemOpenInNewTab } from 'features/gallery/components/ImageContextMenu/ImageMenuItemOpenInNewTab';
|
||||
import { ImageMenuItemOpenInViewer } from 'features/gallery/components/ImageContextMenu/ImageMenuItemOpenInViewer';
|
||||
import { ImageMenuItemSelectForCompare } from 'features/gallery/components/ImageContextMenu/ImageMenuItemSelectForCompare';
|
||||
@@ -38,7 +39,8 @@ const SingleSelectionMenuItems = ({ imageDTO }: SingleSelectionMenuItemsProps) =
|
||||
<MenuDivider />
|
||||
<ImageMenuItemSendToUpscale />
|
||||
<CanvasManagerProviderGate>
|
||||
<ImageMenuItemNewFromImageSubMenu />
|
||||
<ImageMenuItemNewCanvasFromImageSubMenu />
|
||||
<ImageMenuItemNewLayerFromImageSubMenu />
|
||||
</CanvasManagerProviderGate>
|
||||
<MenuDivider />
|
||||
<ImageMenuItemChangeBoard />
|
||||
|
||||
@@ -182,21 +182,24 @@ export const createNewCanvasEntityFromImage = (arg: {
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a new canvas with the given image as the initial image, replicating the img2img flow:
|
||||
* Creates a new canvas with the given image as the only layer:
|
||||
* - Reset the canvas
|
||||
* - Resize the bbox to the image's aspect ratio at the optimal size for the selected model
|
||||
* - Add the image as a raster layer
|
||||
* - Resizes the layer to fit the bbox using the 'fill' strategy
|
||||
* - Add the image as a layer of the given type
|
||||
* - If `withResize`: Resizes the layer to fit the bbox using the 'fill' strategy
|
||||
*
|
||||
* This allows the user to immediately generate a new image from the given image without any additional steps.
|
||||
*
|
||||
* Using 'raster_layer' for the type and enabling `withResize` replicates the common img2img flow.
|
||||
*/
|
||||
export const newCanvasFromImage = (arg: {
|
||||
imageDTO: ImageDTO;
|
||||
type: CanvasEntityType | 'regional_guidance_with_reference_image';
|
||||
withResize: boolean;
|
||||
dispatch: AppDispatch;
|
||||
getState: () => RootState;
|
||||
}) => {
|
||||
const { type, imageDTO, dispatch, getState } = arg;
|
||||
const { type, imageDTO, withResize, dispatch, getState } = arg;
|
||||
const state = getState();
|
||||
|
||||
const base = selectBboxModelBase(state);
|
||||
@@ -229,7 +232,9 @@ export const newCanvasFromImage = (arg: {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasRasterLayerState>;
|
||||
addInitCallback(overrides.id);
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -243,7 +248,9 @@ export const newCanvasFromImage = (arg: {
|
||||
position: { x, y },
|
||||
controlAdapter: deepClone(initialControlNet),
|
||||
} satisfies Partial<CanvasControlLayerState>;
|
||||
addInitCallback(overrides.id);
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -256,7 +263,9 @@ export const newCanvasFromImage = (arg: {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasInpaintMaskState>;
|
||||
addInitCallback(overrides.id);
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
@@ -269,7 +278,9 @@ export const newCanvasFromImage = (arg: {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
} satisfies Partial<CanvasRegionalGuidanceState>;
|
||||
addInitCallback(overrides.id);
|
||||
if (withResize) {
|
||||
addInitCallback(overrides.id);
|
||||
}
|
||||
dispatch(canvasReset());
|
||||
// The `bboxChangedFromCanvas` reducer does no validation! Careful!
|
||||
dispatch(bboxChangedFromCanvas({ x: 0, y: 0, width, height }));
|
||||
|
||||
@@ -41,7 +41,11 @@ const ClassificationTooltipContent = memo(({ classification }: { classification:
|
||||
}
|
||||
|
||||
if (classification === 'internal') {
|
||||
return t('nodes.prototypeDesc');
|
||||
return t('nodes.internalDesc');
|
||||
}
|
||||
|
||||
if (classification === 'special') {
|
||||
return t('nodes.specialDesc');
|
||||
}
|
||||
|
||||
return null;
|
||||
|
||||
@@ -4,7 +4,7 @@ import type { PersistConfig, RootState } from 'app/store/store';
|
||||
import type { Selector } from 'react-redux';
|
||||
import { SelectionMode } from 'reactflow';
|
||||
|
||||
type WorkflowSettingsState = {
|
||||
export type WorkflowSettingsState = {
|
||||
_version: 1;
|
||||
shouldShowMinimapPanel: boolean;
|
||||
shouldValidateGraph: boolean;
|
||||
|
||||
@@ -1,35 +1,41 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { withResultAsync } from 'common/util/result';
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import type {
|
||||
CanvasControlLayerState,
|
||||
ControlNetConfig,
|
||||
Rect,
|
||||
T2IAdapterConfig,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import type { CanvasControlLayerState, Rect } from 'features/controlLayers/store/types';
|
||||
import { getControlLayerWarnings } from 'features/controlLayers/store/validators';
|
||||
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import type { ParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import type { BaseModelType, ImageDTO, Invocation } from 'services/api/types';
|
||||
import type { ImageDTO, Invocation } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const log = logger('system');
|
||||
|
||||
type AddControlNetsArg = {
|
||||
manager: CanvasManager;
|
||||
entities: CanvasControlLayerState[];
|
||||
g: Graph;
|
||||
rect: Rect;
|
||||
collector: Invocation<'collect'>;
|
||||
model: ParameterModel;
|
||||
};
|
||||
|
||||
type AddControlNetsResult = {
|
||||
addedControlNets: number;
|
||||
};
|
||||
|
||||
export const addControlNets = async (
|
||||
manager: CanvasManager,
|
||||
layers: CanvasControlLayerState[],
|
||||
g: Graph,
|
||||
rect: Rect,
|
||||
collector: Invocation<'collect'>,
|
||||
base: BaseModelType
|
||||
): Promise<AddControlNetsResult> => {
|
||||
const validControlLayers = layers
|
||||
.filter((layer) => layer.isEnabled)
|
||||
.filter((layer) => isValidControlAdapter(layer.controlAdapter, base))
|
||||
.filter((layer) => layer.controlAdapter.type === 'controlnet');
|
||||
export const addControlNets = async ({
|
||||
manager,
|
||||
entities,
|
||||
g,
|
||||
rect,
|
||||
collector,
|
||||
model,
|
||||
}: AddControlNetsArg): Promise<AddControlNetsResult> => {
|
||||
const validControlLayers = entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.filter((entity) => entity.controlAdapter.type === 'controlnet')
|
||||
.filter((entity) => getControlLayerWarnings(entity, model).length === 0);
|
||||
|
||||
const result: AddControlNetsResult = {
|
||||
addedControlNets: 0,
|
||||
@@ -54,22 +60,31 @@ export const addControlNets = async (
|
||||
return result;
|
||||
};
|
||||
|
||||
type AddT2IAdaptersArg = {
|
||||
manager: CanvasManager;
|
||||
entities: CanvasControlLayerState[];
|
||||
g: Graph;
|
||||
rect: Rect;
|
||||
collector: Invocation<'collect'>;
|
||||
model: ParameterModel;
|
||||
};
|
||||
|
||||
type AddT2IAdaptersResult = {
|
||||
addedT2IAdapters: number;
|
||||
};
|
||||
|
||||
export const addT2IAdapters = async (
|
||||
manager: CanvasManager,
|
||||
layers: CanvasControlLayerState[],
|
||||
g: Graph,
|
||||
rect: Rect,
|
||||
collector: Invocation<'collect'>,
|
||||
base: BaseModelType
|
||||
): Promise<AddT2IAdaptersResult> => {
|
||||
const validControlLayers = layers
|
||||
.filter((layer) => layer.isEnabled)
|
||||
.filter((layer) => isValidControlAdapter(layer.controlAdapter, base))
|
||||
.filter((layer) => layer.controlAdapter.type === 't2i_adapter');
|
||||
export const addT2IAdapters = async ({
|
||||
manager,
|
||||
entities,
|
||||
g,
|
||||
rect,
|
||||
collector,
|
||||
model,
|
||||
}: AddT2IAdaptersArg): Promise<AddT2IAdaptersResult> => {
|
||||
const validControlLayers = entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.filter((entity) => entity.controlAdapter.type === 't2i_adapter')
|
||||
.filter((entity) => getControlLayerWarnings(entity, model).length === 0);
|
||||
|
||||
const result: AddT2IAdaptersResult = {
|
||||
addedT2IAdapters: 0,
|
||||
@@ -145,11 +160,3 @@ const addT2IAdapterToGraph = (
|
||||
|
||||
g.addEdge(t2iAdapter, 't2i_adapter', collector, 'item');
|
||||
};
|
||||
|
||||
const isValidControlAdapter = (controlAdapter: ControlNetConfig | T2IAdapterConfig, base: BaseModelType): boolean => {
|
||||
// Must be have a model
|
||||
const hasModel = Boolean(controlAdapter.model);
|
||||
// Model must match the current base model
|
||||
const modelMatchesBase = controlAdapter.model?.base === base;
|
||||
return hasModel && modelMatchesBase;
|
||||
};
|
||||
|
||||
@@ -1,19 +1,23 @@
|
||||
import type { CanvasReferenceImageState } from 'features/controlLayers/store/types';
|
||||
import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators';
|
||||
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import type { BaseModelType, Invocation } from 'services/api/types';
|
||||
import type { ParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
type AddIPAdaptersResult = {
|
||||
addedIPAdapters: number;
|
||||
};
|
||||
|
||||
export const addIPAdapters = (
|
||||
ipAdapters: CanvasReferenceImageState[],
|
||||
g: Graph,
|
||||
collector: Invocation<'collect'>,
|
||||
base: BaseModelType
|
||||
): AddIPAdaptersResult => {
|
||||
const validIPAdapters = ipAdapters.filter((entity) => isValidIPAdapter(entity, base));
|
||||
type AddIPAdaptersArg = {
|
||||
entities: CanvasReferenceImageState[];
|
||||
g: Graph;
|
||||
collector: Invocation<'collect'>;
|
||||
model: ParameterModel;
|
||||
};
|
||||
|
||||
export const addIPAdapters = ({ entities, g, collector, model }: AddIPAdaptersArg): AddIPAdaptersResult => {
|
||||
const validIPAdapters = entities.filter((entity) => getGlobalReferenceImageWarnings(entity, model).length === 0);
|
||||
|
||||
const result: AddIPAdaptersResult = {
|
||||
addedIPAdapters: 0,
|
||||
@@ -76,11 +80,3 @@ const addIPAdapter = (entity: CanvasReferenceImageState, g: Graph, collector: In
|
||||
|
||||
g.addEdge(ipAdapterNode, 'ip_adapter', collector, 'item');
|
||||
};
|
||||
|
||||
const isValidIPAdapter = ({ isEnabled, ipAdapter }: CanvasReferenceImageState, base: BaseModelType): boolean => {
|
||||
// Must be have a model that matches the current base and must have a control image
|
||||
const hasModel = Boolean(ipAdapter.model);
|
||||
const modelMatchesBase = ipAdapter.model?.base === base;
|
||||
const hasImage = Boolean(ipAdapter.image);
|
||||
return isEnabled && hasModel && modelMatchesBase && hasImage;
|
||||
};
|
||||
|
||||
@@ -3,15 +3,12 @@ import { deepClone } from 'common/util/deepClone';
|
||||
import { withResultAsync } from 'common/util/result';
|
||||
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import type {
|
||||
CanvasRegionalGuidanceState,
|
||||
IPAdapterConfig,
|
||||
Rect,
|
||||
RegionalGuidanceReferenceImageState,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import type { CanvasRegionalGuidanceState, Rect } from 'features/controlLayers/store/types';
|
||||
import { getRegionalGuidanceWarnings } from 'features/controlLayers/store/validators';
|
||||
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import type { ParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import type { BaseModelType, Invocation } from 'services/api/types';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const log = logger('system');
|
||||
@@ -23,19 +20,26 @@ type AddedRegionResult = {
|
||||
addedIPAdapters: number;
|
||||
};
|
||||
|
||||
const isValidRegion = (rg: CanvasRegionalGuidanceState, base: BaseModelType) => {
|
||||
const isEnabled = rg.isEnabled;
|
||||
const hasTextPrompt = Boolean(rg.positivePrompt || rg.negativePrompt);
|
||||
const hasIPAdapter = rg.referenceImages.filter(({ ipAdapter }) => isValidIPAdapter(ipAdapter, base)).length > 0;
|
||||
return isEnabled && (hasTextPrompt || hasIPAdapter);
|
||||
type AddRegionsArg = {
|
||||
manager: CanvasManager;
|
||||
regions: CanvasRegionalGuidanceState[];
|
||||
g: Graph;
|
||||
bbox: Rect;
|
||||
model: ParameterModel;
|
||||
posCond: Invocation<'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder'>;
|
||||
negCond: Invocation<'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder'> | null;
|
||||
posCondCollect: Invocation<'collect'>;
|
||||
negCondCollect: Invocation<'collect'> | null;
|
||||
ipAdapterCollect: Invocation<'collect'>;
|
||||
};
|
||||
|
||||
/**
|
||||
* Adds regional guidance to the graph
|
||||
* @param manager The canvas manager
|
||||
* @param regions Array of regions to add
|
||||
* @param g The graph to add the layers to
|
||||
* @param base The base model type
|
||||
* @param denoise The main denoise node
|
||||
* @param bbox The bounding box
|
||||
* @param model The main model
|
||||
* @param posCond The positive conditioning node
|
||||
* @param negCond The negative conditioning node
|
||||
* @param posCondCollect The positive conditioning collector
|
||||
@@ -44,22 +48,28 @@ const isValidRegion = (rg: CanvasRegionalGuidanceState, base: BaseModelType) =>
|
||||
* @returns A promise that resolves to the regions that were successfully added to the graph
|
||||
*/
|
||||
|
||||
export const addRegions = async (
|
||||
manager: CanvasManager,
|
||||
regions: CanvasRegionalGuidanceState[],
|
||||
g: Graph,
|
||||
bbox: Rect,
|
||||
base: BaseModelType,
|
||||
denoise: Invocation<'denoise_latents'>,
|
||||
posCond: Invocation<'compel'> | Invocation<'sdxl_compel_prompt'>,
|
||||
negCond: Invocation<'compel'> | Invocation<'sdxl_compel_prompt'>,
|
||||
posCondCollect: Invocation<'collect'>,
|
||||
negCondCollect: Invocation<'collect'>,
|
||||
ipAdapterCollect: Invocation<'collect'>
|
||||
): Promise<AddedRegionResult[]> => {
|
||||
const isSDXL = base === 'sdxl';
|
||||
export const addRegions = async ({
|
||||
manager,
|
||||
regions,
|
||||
g,
|
||||
bbox,
|
||||
model,
|
||||
posCond,
|
||||
negCond,
|
||||
posCondCollect,
|
||||
negCondCollect,
|
||||
ipAdapterCollect,
|
||||
}: AddRegionsArg): Promise<AddedRegionResult[]> => {
|
||||
const isSDXL = model.base === 'sdxl';
|
||||
const isFLUX = model.base === 'flux';
|
||||
|
||||
const validRegions = regions.filter((rg) => {
|
||||
if (!rg.isEnabled) {
|
||||
return false;
|
||||
}
|
||||
return getRegionalGuidanceWarnings(rg, model).length === 0;
|
||||
});
|
||||
|
||||
const validRegions = regions.filter((rg) => isValidRegion(rg, base));
|
||||
const results: AddedRegionResult[] = [];
|
||||
|
||||
for (const region of validRegions) {
|
||||
@@ -94,20 +104,27 @@ export const addRegions = async (
|
||||
if (region.positivePrompt) {
|
||||
// The main positive conditioning node
|
||||
result.addedPositivePrompt = true;
|
||||
const regionalPosCond = g.addNode(
|
||||
isSDXL
|
||||
? {
|
||||
type: 'sdxl_compel_prompt',
|
||||
id: getPrefixedId('prompt_region_positive_cond'),
|
||||
prompt: region.positivePrompt,
|
||||
style: region.positivePrompt, // TODO: Should we put the positive prompt in both fields?
|
||||
}
|
||||
: {
|
||||
type: 'compel',
|
||||
id: getPrefixedId('prompt_region_positive_cond'),
|
||||
prompt: region.positivePrompt,
|
||||
}
|
||||
);
|
||||
let regionalPosCond: Invocation<'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder'>;
|
||||
if (isSDXL) {
|
||||
regionalPosCond = g.addNode({
|
||||
type: 'sdxl_compel_prompt',
|
||||
id: getPrefixedId('prompt_region_positive_cond'),
|
||||
prompt: region.positivePrompt,
|
||||
style: region.positivePrompt, // TODO: Should we put the positive prompt in both fields?
|
||||
});
|
||||
} else if (isFLUX) {
|
||||
regionalPosCond = g.addNode({
|
||||
type: 'flux_text_encoder',
|
||||
id: getPrefixedId('prompt_region_positive_cond'),
|
||||
prompt: region.positivePrompt,
|
||||
});
|
||||
} else {
|
||||
regionalPosCond = g.addNode({
|
||||
type: 'compel',
|
||||
id: getPrefixedId('prompt_region_positive_cond'),
|
||||
prompt: region.positivePrompt,
|
||||
});
|
||||
}
|
||||
// Connect the mask to the conditioning
|
||||
g.addEdge(maskToTensor, 'mask', regionalPosCond, 'mask');
|
||||
// Connect the conditioning to the collector
|
||||
@@ -115,38 +132,55 @@ export const addRegions = async (
|
||||
// Copy the connections to the "global" positive conditioning node to the regional cond
|
||||
if (posCond.type === 'compel') {
|
||||
for (const edge of g.getEdgesTo(posCond, ['clip', 'mask'])) {
|
||||
// Clone the edge, but change the destination node to the regional conditioning node
|
||||
const clone = deepClone(edge);
|
||||
clone.destination.node_id = regionalPosCond.id;
|
||||
g.addEdgeFromObj(clone);
|
||||
}
|
||||
} else if (posCond.type === 'sdxl_compel_prompt') {
|
||||
for (const edge of g.getEdgesTo(posCond, ['clip', 'clip2', 'mask'])) {
|
||||
const clone = deepClone(edge);
|
||||
clone.destination.node_id = regionalPosCond.id;
|
||||
g.addEdgeFromObj(clone);
|
||||
}
|
||||
} else if (posCond.type === 'flux_text_encoder') {
|
||||
for (const edge of g.getEdgesTo(posCond, ['clip', 't5_encoder', 't5_max_seq_len', 'mask'])) {
|
||||
const clone = deepClone(edge);
|
||||
clone.destination.node_id = regionalPosCond.id;
|
||||
g.addEdgeFromObj(clone);
|
||||
}
|
||||
} else {
|
||||
for (const edge of g.getEdgesTo(posCond, ['clip', 'clip2', 'mask'])) {
|
||||
// Clone the edge, but change the destination node to the regional conditioning node
|
||||
const clone = deepClone(edge);
|
||||
clone.destination.node_id = regionalPosCond.id;
|
||||
g.addEdgeFromObj(clone);
|
||||
}
|
||||
assert(false, 'Unsupported positive conditioning node type.');
|
||||
}
|
||||
}
|
||||
|
||||
if (region.negativePrompt) {
|
||||
result.addedNegativePrompt = true;
|
||||
assert(negCond, 'Negative conditioning node is required if there is a negative prompt');
|
||||
assert(negCondCollect, 'Negative conditioning collector is required if there is a negative prompt');
|
||||
|
||||
// The main negative conditioning node
|
||||
const regionalNegCond = g.addNode(
|
||||
isSDXL
|
||||
? {
|
||||
type: 'sdxl_compel_prompt',
|
||||
id: getPrefixedId('prompt_region_negative_cond'),
|
||||
prompt: region.negativePrompt,
|
||||
style: region.negativePrompt,
|
||||
}
|
||||
: {
|
||||
type: 'compel',
|
||||
id: getPrefixedId('prompt_region_negative_cond'),
|
||||
prompt: region.negativePrompt,
|
||||
}
|
||||
);
|
||||
result.addedNegativePrompt = true;
|
||||
let regionalNegCond: Invocation<'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder'>;
|
||||
if (isSDXL) {
|
||||
regionalNegCond = g.addNode({
|
||||
type: 'sdxl_compel_prompt',
|
||||
id: getPrefixedId('prompt_region_negative_cond'),
|
||||
prompt: region.negativePrompt,
|
||||
style: region.negativePrompt,
|
||||
});
|
||||
} else if (isFLUX) {
|
||||
regionalNegCond = g.addNode({
|
||||
type: 'flux_text_encoder',
|
||||
id: getPrefixedId('prompt_region_negative_cond'),
|
||||
prompt: region.negativePrompt,
|
||||
});
|
||||
} else {
|
||||
regionalNegCond = g.addNode({
|
||||
type: 'compel',
|
||||
id: getPrefixedId('prompt_region_negative_cond'),
|
||||
prompt: region.negativePrompt,
|
||||
});
|
||||
}
|
||||
|
||||
// Connect the mask to the conditioning
|
||||
g.addEdge(maskToTensor, 'mask', regionalNegCond, 'mask');
|
||||
// Connect the conditioning to the collector
|
||||
@@ -158,17 +192,27 @@ export const addRegions = async (
|
||||
clone.destination.node_id = regionalNegCond.id;
|
||||
g.addEdgeFromObj(clone);
|
||||
}
|
||||
} else {
|
||||
} else if (negCond.type === 'sdxl_compel_prompt') {
|
||||
for (const edge of g.getEdgesTo(negCond, ['clip', 'clip2', 'mask'])) {
|
||||
const clone = deepClone(edge);
|
||||
clone.destination.node_id = regionalNegCond.id;
|
||||
g.addEdgeFromObj(clone);
|
||||
}
|
||||
} else if (negCond.type === 'flux_text_encoder') {
|
||||
for (const edge of g.getEdgesTo(negCond, ['clip', 't5_encoder', 't5_max_seq_len', 'mask'])) {
|
||||
const clone = deepClone(edge);
|
||||
clone.destination.node_id = regionalNegCond.id;
|
||||
g.addEdgeFromObj(clone);
|
||||
}
|
||||
} else {
|
||||
assert(false, 'Unsupported negative conditioning node type.');
|
||||
}
|
||||
}
|
||||
|
||||
// If we are using the "invert" auto-negative setting, we need to add an additional negative conditioning node
|
||||
if (region.autoNegative && region.positivePrompt) {
|
||||
assert(negCondCollect, 'Negative conditioning collector is required if there is an auto-negative setting');
|
||||
|
||||
result.addedAutoNegativePositivePrompt = true;
|
||||
// We re-use the mask image, but invert it when converting to tensor
|
||||
const invertTensorMask = g.addNode({
|
||||
@@ -178,20 +222,27 @@ export const addRegions = async (
|
||||
// Connect the OG mask image to the inverted mask-to-tensor node
|
||||
g.addEdge(maskToTensor, 'mask', invertTensorMask, 'mask');
|
||||
// Create the conditioning node. It's going to be connected to the negative cond collector, but it uses the positive prompt
|
||||
const regionalPosCondInverted = g.addNode(
|
||||
isSDXL
|
||||
? {
|
||||
type: 'sdxl_compel_prompt',
|
||||
id: getPrefixedId('prompt_region_positive_cond_inverted'),
|
||||
prompt: region.positivePrompt,
|
||||
style: region.positivePrompt,
|
||||
}
|
||||
: {
|
||||
type: 'compel',
|
||||
id: getPrefixedId('prompt_region_positive_cond_inverted'),
|
||||
prompt: region.positivePrompt,
|
||||
}
|
||||
);
|
||||
let regionalPosCondInverted: Invocation<'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder'>;
|
||||
if (isSDXL) {
|
||||
regionalPosCondInverted = g.addNode({
|
||||
type: 'sdxl_compel_prompt',
|
||||
id: getPrefixedId('prompt_region_positive_cond_inverted'),
|
||||
prompt: region.positivePrompt,
|
||||
style: region.positivePrompt,
|
||||
});
|
||||
} else if (isFLUX) {
|
||||
regionalPosCondInverted = g.addNode({
|
||||
type: 'flux_text_encoder',
|
||||
id: getPrefixedId('prompt_region_positive_cond_inverted'),
|
||||
prompt: region.positivePrompt,
|
||||
});
|
||||
} else {
|
||||
regionalPosCondInverted = g.addNode({
|
||||
type: 'compel',
|
||||
id: getPrefixedId('prompt_region_positive_cond_inverted'),
|
||||
prompt: region.positivePrompt,
|
||||
});
|
||||
}
|
||||
// Connect the inverted mask to the conditioning
|
||||
g.addEdge(invertTensorMask, 'mask', regionalPosCondInverted, 'mask');
|
||||
// Connect the conditioning to the negative collector
|
||||
@@ -203,20 +254,26 @@ export const addRegions = async (
|
||||
clone.destination.node_id = regionalPosCondInverted.id;
|
||||
g.addEdgeFromObj(clone);
|
||||
}
|
||||
} else {
|
||||
} else if (posCond.type === 'sdxl_compel_prompt') {
|
||||
for (const edge of g.getEdgesTo(posCond, ['clip', 'clip2', 'mask'])) {
|
||||
const clone = deepClone(edge);
|
||||
clone.destination.node_id = regionalPosCondInverted.id;
|
||||
g.addEdgeFromObj(clone);
|
||||
}
|
||||
} else if (posCond.type === 'flux_text_encoder') {
|
||||
for (const edge of g.getEdgesTo(posCond, ['clip', 't5_encoder', 't5_max_seq_len', 'mask'])) {
|
||||
const clone = deepClone(edge);
|
||||
clone.destination.node_id = regionalPosCondInverted.id;
|
||||
g.addEdgeFromObj(clone);
|
||||
}
|
||||
} else {
|
||||
assert(false, 'Unsupported positive conditioning node type.');
|
||||
}
|
||||
}
|
||||
|
||||
const validRGIPAdapters: RegionalGuidanceReferenceImageState[] = region.referenceImages.filter(({ ipAdapter }) =>
|
||||
isValidIPAdapter(ipAdapter, base)
|
||||
);
|
||||
for (const { id, ipAdapter } of region.referenceImages) {
|
||||
assert(!isFLUX, 'Regional IP adapters are not supported for FLUX.');
|
||||
|
||||
for (const { id, ipAdapter } of validRGIPAdapters) {
|
||||
result.addedIPAdapters++;
|
||||
const { weight, model, clipVisionModel, method, beginEndStepPct, image } = ipAdapter;
|
||||
assert(model, 'IP Adapter model is required');
|
||||
@@ -248,11 +305,3 @@ export const addRegions = async (
|
||||
|
||||
return results;
|
||||
};
|
||||
|
||||
const isValidIPAdapter = (ipAdapter: IPAdapterConfig, base: BaseModelType): boolean => {
|
||||
// Must be have a model that matches the current base and must have a control image
|
||||
const hasModel = Boolean(ipAdapter.model);
|
||||
const modelMatchesBase = ipAdapter.model?.base === base;
|
||||
const hasImage = Boolean(ipAdapter.image);
|
||||
return hasModel && modelMatchesBase && hasImage;
|
||||
};
|
||||
|
||||
@@ -11,6 +11,7 @@ import { addImageToImage } from 'features/nodes/util/graph/generation/addImageTo
|
||||
import { addInpaint } from 'features/nodes/util/graph/generation/addInpaint';
|
||||
import { addNSFWChecker } from 'features/nodes/util/graph/generation/addNSFWChecker';
|
||||
import { addOutpaint } from 'features/nodes/util/graph/generation/addOutpaint';
|
||||
import { addRegions } from 'features/nodes/util/graph/generation/addRegions';
|
||||
import { addTextToImage } from 'features/nodes/util/graph/generation/addTextToImage';
|
||||
import { addWatermarker } from 'features/nodes/util/graph/generation/addWatermarker';
|
||||
import { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
@@ -79,7 +80,10 @@ export const buildFLUXGraph = async (
|
||||
id: getPrefixedId('flux_text_encoder'),
|
||||
prompt: positivePrompt,
|
||||
});
|
||||
|
||||
const posCondCollect = g.addNode({
|
||||
type: 'collect',
|
||||
id: getPrefixedId('pos_cond_collect'),
|
||||
});
|
||||
const denoise = g.addNode({
|
||||
type: 'flux_denoise',
|
||||
id: getPrefixedId('flux_denoise'),
|
||||
@@ -104,13 +108,12 @@ export const buildFLUXGraph = async (
|
||||
g.addEdge(modelLoader, 'clip', posCond, 'clip');
|
||||
g.addEdge(modelLoader, 't5_encoder', posCond, 't5_encoder');
|
||||
g.addEdge(modelLoader, 'max_seq_len', posCond, 't5_max_seq_len');
|
||||
g.addEdge(posCond, 'conditioning', posCondCollect, 'item');
|
||||
g.addEdge(posCondCollect, 'collection', denoise, 'positive_text_conditioning');
|
||||
g.addEdge(denoise, 'latents', l2i, 'latents');
|
||||
|
||||
addFLUXLoRAs(state, g, denoise, modelLoader, posCond);
|
||||
|
||||
g.addEdge(posCond, 'conditioning', denoise, 'positive_text_conditioning');
|
||||
|
||||
g.addEdge(denoise, 'latents', l2i, 'latents');
|
||||
|
||||
const modelConfig = await fetchModelConfigWithTypeGuard(model.key, isNonRefinerMainModelConfig);
|
||||
assert(modelConfig.base === 'flux');
|
||||
|
||||
@@ -196,31 +199,50 @@ export const buildFLUXGraph = async (
|
||||
type: 'collect',
|
||||
id: getPrefixedId('control_net_collector'),
|
||||
});
|
||||
const controlNetResult = await addControlNets(
|
||||
const controlNetResult = await addControlNets({
|
||||
manager,
|
||||
canvas.controlLayers.entities,
|
||||
entities: canvas.controlLayers.entities,
|
||||
g,
|
||||
canvas.bbox.rect,
|
||||
controlNetCollector,
|
||||
modelConfig.base
|
||||
);
|
||||
rect: canvas.bbox.rect,
|
||||
collector: controlNetCollector,
|
||||
model: modelConfig,
|
||||
});
|
||||
if (controlNetResult.addedControlNets > 0) {
|
||||
g.addEdge(controlNetCollector, 'collection', denoise, 'control');
|
||||
} else {
|
||||
g.deleteNode(controlNetCollector.id);
|
||||
}
|
||||
|
||||
const ipAdapterCollector = g.addNode({
|
||||
const ipAdapterCollect = g.addNode({
|
||||
type: 'collect',
|
||||
id: getPrefixedId('ip_adapter_collector'),
|
||||
});
|
||||
const ipAdapterResult = addIPAdapters(canvas.referenceImages.entities, g, ipAdapterCollector, modelConfig.base);
|
||||
const ipAdapterResult = addIPAdapters({
|
||||
entities: canvas.referenceImages.entities,
|
||||
g,
|
||||
collector: ipAdapterCollect,
|
||||
model: modelConfig,
|
||||
});
|
||||
|
||||
const totalIPAdaptersAdded = ipAdapterResult.addedIPAdapters;
|
||||
const regionsResult = await addRegions({
|
||||
manager,
|
||||
regions: canvas.regionalGuidance.entities,
|
||||
g,
|
||||
bbox: canvas.bbox.rect,
|
||||
model: modelConfig,
|
||||
posCond,
|
||||
negCond: null,
|
||||
posCondCollect,
|
||||
negCondCollect: null,
|
||||
ipAdapterCollect,
|
||||
});
|
||||
|
||||
const totalIPAdaptersAdded =
|
||||
ipAdapterResult.addedIPAdapters + regionsResult.reduce((acc, r) => acc + r.addedIPAdapters, 0);
|
||||
if (totalIPAdaptersAdded > 0) {
|
||||
g.addEdge(ipAdapterCollector, 'collection', denoise, 'ip_adapter');
|
||||
g.addEdge(ipAdapterCollect, 'collection', denoise, 'ip_adapter');
|
||||
} else {
|
||||
g.deleteNode(ipAdapterCollector.id);
|
||||
g.deleteNode(ipAdapterCollect.id);
|
||||
}
|
||||
|
||||
if (state.system.shouldUseNSFWChecker) {
|
||||
|
||||
@@ -227,14 +227,14 @@ export const buildSD1Graph = async (
|
||||
type: 'collect',
|
||||
id: getPrefixedId('control_net_collector'),
|
||||
});
|
||||
const controlNetResult = await addControlNets(
|
||||
const controlNetResult = await addControlNets({
|
||||
manager,
|
||||
canvas.controlLayers.entities,
|
||||
entities: canvas.controlLayers.entities,
|
||||
g,
|
||||
canvas.bbox.rect,
|
||||
controlNetCollector,
|
||||
modelConfig.base
|
||||
);
|
||||
rect: canvas.bbox.rect,
|
||||
collector: controlNetCollector,
|
||||
model: modelConfig,
|
||||
});
|
||||
if (controlNetResult.addedControlNets > 0) {
|
||||
g.addEdge(controlNetCollector, 'collection', denoise, 'control');
|
||||
} else {
|
||||
@@ -245,46 +245,50 @@ export const buildSD1Graph = async (
|
||||
type: 'collect',
|
||||
id: getPrefixedId('t2i_adapter_collector'),
|
||||
});
|
||||
const t2iAdapterResult = await addT2IAdapters(
|
||||
const t2iAdapterResult = await addT2IAdapters({
|
||||
manager,
|
||||
canvas.controlLayers.entities,
|
||||
entities: canvas.controlLayers.entities,
|
||||
g,
|
||||
canvas.bbox.rect,
|
||||
t2iAdapterCollector,
|
||||
modelConfig.base
|
||||
);
|
||||
rect: canvas.bbox.rect,
|
||||
collector: t2iAdapterCollector,
|
||||
model: modelConfig,
|
||||
});
|
||||
if (t2iAdapterResult.addedT2IAdapters > 0) {
|
||||
g.addEdge(t2iAdapterCollector, 'collection', denoise, 't2i_adapter');
|
||||
} else {
|
||||
g.deleteNode(t2iAdapterCollector.id);
|
||||
}
|
||||
|
||||
const ipAdapterCollector = g.addNode({
|
||||
const ipAdapterCollect = g.addNode({
|
||||
type: 'collect',
|
||||
id: getPrefixedId('ip_adapter_collector'),
|
||||
});
|
||||
const ipAdapterResult = addIPAdapters(canvas.referenceImages.entities, g, ipAdapterCollector, modelConfig.base);
|
||||
|
||||
const regionsResult = await addRegions(
|
||||
manager,
|
||||
canvas.regionalGuidance.entities,
|
||||
const ipAdapterResult = addIPAdapters({
|
||||
entities: canvas.referenceImages.entities,
|
||||
g,
|
||||
canvas.bbox.rect,
|
||||
modelConfig.base,
|
||||
denoise,
|
||||
collector: ipAdapterCollect,
|
||||
model: modelConfig,
|
||||
});
|
||||
|
||||
const regionsResult = await addRegions({
|
||||
manager,
|
||||
regions: canvas.regionalGuidance.entities,
|
||||
g,
|
||||
bbox: canvas.bbox.rect,
|
||||
model: modelConfig,
|
||||
posCond,
|
||||
negCond,
|
||||
posCondCollect,
|
||||
negCondCollect,
|
||||
ipAdapterCollector
|
||||
);
|
||||
ipAdapterCollect,
|
||||
});
|
||||
|
||||
const totalIPAdaptersAdded =
|
||||
ipAdapterResult.addedIPAdapters + regionsResult.reduce((acc, r) => acc + r.addedIPAdapters, 0);
|
||||
if (totalIPAdaptersAdded > 0) {
|
||||
g.addEdge(ipAdapterCollector, 'collection', denoise, 'ip_adapter');
|
||||
g.addEdge(ipAdapterCollect, 'collection', denoise, 'ip_adapter');
|
||||
} else {
|
||||
g.deleteNode(ipAdapterCollector.id);
|
||||
g.deleteNode(ipAdapterCollect.id);
|
||||
}
|
||||
|
||||
if (state.system.shouldUseNSFWChecker) {
|
||||
|
||||
@@ -232,14 +232,14 @@ export const buildSDXLGraph = async (
|
||||
type: 'collect',
|
||||
id: getPrefixedId('control_net_collector'),
|
||||
});
|
||||
const controlNetResult = await addControlNets(
|
||||
const controlNetResult = await addControlNets({
|
||||
manager,
|
||||
canvas.controlLayers.entities,
|
||||
entities: canvas.controlLayers.entities,
|
||||
g,
|
||||
canvas.bbox.rect,
|
||||
controlNetCollector,
|
||||
modelConfig.base
|
||||
);
|
||||
rect: canvas.bbox.rect,
|
||||
collector: controlNetCollector,
|
||||
model: modelConfig,
|
||||
});
|
||||
if (controlNetResult.addedControlNets > 0) {
|
||||
g.addEdge(controlNetCollector, 'collection', denoise, 'control');
|
||||
} else {
|
||||
@@ -250,46 +250,50 @@ export const buildSDXLGraph = async (
|
||||
type: 'collect',
|
||||
id: getPrefixedId('t2i_adapter_collector'),
|
||||
});
|
||||
const t2iAdapterResult = await addT2IAdapters(
|
||||
const t2iAdapterResult = await addT2IAdapters({
|
||||
manager,
|
||||
canvas.controlLayers.entities,
|
||||
entities: canvas.controlLayers.entities,
|
||||
g,
|
||||
canvas.bbox.rect,
|
||||
t2iAdapterCollector,
|
||||
modelConfig.base
|
||||
);
|
||||
rect: canvas.bbox.rect,
|
||||
collector: t2iAdapterCollector,
|
||||
model: modelConfig,
|
||||
});
|
||||
if (t2iAdapterResult.addedT2IAdapters > 0) {
|
||||
g.addEdge(t2iAdapterCollector, 'collection', denoise, 't2i_adapter');
|
||||
} else {
|
||||
g.deleteNode(t2iAdapterCollector.id);
|
||||
}
|
||||
|
||||
const ipAdapterCollector = g.addNode({
|
||||
const ipAdapterCollect = g.addNode({
|
||||
type: 'collect',
|
||||
id: getPrefixedId('ip_adapter_collector'),
|
||||
});
|
||||
const ipAdapterResult = addIPAdapters(canvas.referenceImages.entities, g, ipAdapterCollector, modelConfig.base);
|
||||
|
||||
const regionsResult = await addRegions(
|
||||
manager,
|
||||
canvas.regionalGuidance.entities,
|
||||
const ipAdapterResult = addIPAdapters({
|
||||
entities: canvas.referenceImages.entities,
|
||||
g,
|
||||
canvas.bbox.rect,
|
||||
modelConfig.base,
|
||||
denoise,
|
||||
collector: ipAdapterCollect,
|
||||
model: modelConfig,
|
||||
});
|
||||
|
||||
const regionsResult = await addRegions({
|
||||
manager,
|
||||
regions: canvas.regionalGuidance.entities,
|
||||
g,
|
||||
bbox: canvas.bbox.rect,
|
||||
model: modelConfig,
|
||||
posCond,
|
||||
negCond,
|
||||
posCondCollect,
|
||||
negCondCollect,
|
||||
ipAdapterCollector
|
||||
);
|
||||
ipAdapterCollect,
|
||||
});
|
||||
|
||||
const totalIPAdaptersAdded =
|
||||
ipAdapterResult.addedIPAdapters + regionsResult.reduce((acc, r) => acc + r.addedIPAdapters, 0);
|
||||
if (totalIPAdaptersAdded > 0) {
|
||||
g.addEdge(ipAdapterCollector, 'collection', denoise, 'ip_adapter');
|
||||
g.addEdge(ipAdapterCollect, 'collection', denoise, 'ip_adapter');
|
||||
} else {
|
||||
g.deleteNode(ipAdapterCollector.id);
|
||||
g.deleteNode(ipAdapterCollect.id);
|
||||
}
|
||||
|
||||
if (state.system.shouldUseNSFWChecker) {
|
||||
|
||||
@@ -58,6 +58,9 @@ const isAllowedOutputField = (nodeType: string, fieldName: string) => {
|
||||
if (RESERVED_OUTPUT_FIELD_NAMES.includes(fieldName)) {
|
||||
return false;
|
||||
}
|
||||
if (nodeType === 'image_batch' && fieldName !== 'image') {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
|
||||
@@ -0,0 +1,310 @@
|
||||
import type { TooltipProps } from '@invoke-ai/ui-library';
|
||||
import { Divider, Flex, ListItem, Text, Tooltip, UnorderedList } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { $true } from 'app/store/nanostores/util';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useCanvasManagerSafe } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { selectSendToCanvas } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { selectIterations } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectDynamicPromptsIsLoading } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import type { Reason } from 'features/queue/store/readiness';
|
||||
import {
|
||||
buildSelectIsReadyToEnqueueCanvasTab,
|
||||
buildSelectIsReadyToEnqueueUpscaleTab,
|
||||
buildSelectIsReadyToEnqueueWorkflowsTab,
|
||||
buildSelectReasonsWhyCannotEnqueueCanvasTab,
|
||||
buildSelectReasonsWhyCannotEnqueueUpscaleTab,
|
||||
buildSelectReasonsWhyCannotEnqueueWorkflowsTab,
|
||||
selectPromptsCount,
|
||||
selectWorkflowsBatchSize,
|
||||
} from 'features/queue/store/readiness';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { enqueueMutationFixedCacheKeyOptions, useEnqueueBatchMutation } from 'services/api/endpoints/queue';
|
||||
import { useBoardName } from 'services/api/hooks/useBoardName';
|
||||
import { $isConnected } from 'services/events/stores';
|
||||
|
||||
type Props = TooltipProps & {
|
||||
prepend?: boolean;
|
||||
};
|
||||
|
||||
export const InvokeButtonTooltip = ({ prepend, children, ...rest }: PropsWithChildren<Props>) => {
|
||||
return (
|
||||
<Tooltip label={<TooltipContent prepend={prepend} />} maxW={512} {...rest}>
|
||||
{children}
|
||||
</Tooltip>
|
||||
);
|
||||
};
|
||||
|
||||
const TooltipContent = memo(({ prepend = false }: { prepend?: boolean }) => {
|
||||
const activeTab = useAppSelector(selectActiveTab);
|
||||
|
||||
if (activeTab === 'canvas') {
|
||||
return <CanvasTabTooltipContent prepend={prepend} />;
|
||||
}
|
||||
|
||||
if (activeTab === 'workflows') {
|
||||
return <WorkflowsTabTooltipContent prepend={prepend} />;
|
||||
}
|
||||
|
||||
if (activeTab === 'upscaling') {
|
||||
return <UpscaleTabTooltipContent prepend={prepend} />;
|
||||
}
|
||||
|
||||
return null;
|
||||
});
|
||||
TooltipContent.displayName = 'TooltipContent';
|
||||
|
||||
const CanvasTabTooltipContent = memo(({ prepend = false }: { prepend?: boolean }) => {
|
||||
const isConnected = useStore($isConnected);
|
||||
const canvasManager = useCanvasManagerSafe();
|
||||
const canvasIsFiltering = useStore(canvasManager?.stateApi.$isFiltering ?? $true);
|
||||
const canvasIsTransforming = useStore(canvasManager?.stateApi.$isTransforming ?? $true);
|
||||
const canvasIsRasterizing = useStore(canvasManager?.stateApi.$isRasterizing ?? $true);
|
||||
const canvasIsSelectingObject = useStore(canvasManager?.stateApi.$isSegmenting ?? $true);
|
||||
const canvasIsCompositing = useStore(canvasManager?.compositor.$isBusy ?? $true);
|
||||
|
||||
const selectIsReady = useMemo(
|
||||
() =>
|
||||
buildSelectIsReadyToEnqueueCanvasTab({
|
||||
isConnected,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsSelectingObject,
|
||||
canvasIsCompositing,
|
||||
}),
|
||||
[
|
||||
isConnected,
|
||||
canvasIsCompositing,
|
||||
canvasIsFiltering,
|
||||
canvasIsRasterizing,
|
||||
canvasIsSelectingObject,
|
||||
canvasIsTransforming,
|
||||
]
|
||||
);
|
||||
|
||||
const selectReasons = useMemo(
|
||||
() =>
|
||||
buildSelectReasonsWhyCannotEnqueueCanvasTab({
|
||||
isConnected,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsSelectingObject,
|
||||
canvasIsCompositing,
|
||||
}),
|
||||
[
|
||||
isConnected,
|
||||
canvasIsCompositing,
|
||||
canvasIsFiltering,
|
||||
canvasIsRasterizing,
|
||||
canvasIsSelectingObject,
|
||||
canvasIsTransforming,
|
||||
]
|
||||
);
|
||||
|
||||
const isReady = useAppSelector(selectIsReady);
|
||||
const reasons = useAppSelector(selectReasons);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={1}>
|
||||
<IsReadyText isReady={isReady} prepend={prepend} />
|
||||
<QueueCountPredictionCanvasOrUpscaleTab />
|
||||
{reasons.length > 0 && (
|
||||
<>
|
||||
<StyledDivider />
|
||||
<ReasonsList reasons={reasons} />
|
||||
</>
|
||||
)}
|
||||
<StyledDivider />
|
||||
<AddingToText />
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
CanvasTabTooltipContent.displayName = 'CanvasTabTooltipContent';
|
||||
|
||||
const UpscaleTabTooltipContent = memo(({ prepend = false }: { prepend?: boolean }) => {
|
||||
const isConnected = useStore($isConnected);
|
||||
|
||||
const selectIsReady = useMemo(() => buildSelectIsReadyToEnqueueUpscaleTab({ isConnected }), [isConnected]);
|
||||
const selectReasons = useMemo(() => buildSelectReasonsWhyCannotEnqueueUpscaleTab({ isConnected }), [isConnected]);
|
||||
|
||||
const isReady = useAppSelector(selectIsReady);
|
||||
const reasons = useAppSelector(selectReasons);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={1}>
|
||||
<IsReadyText isReady={isReady} prepend={prepend} />
|
||||
<QueueCountPredictionCanvasOrUpscaleTab />
|
||||
{reasons.length > 0 && (
|
||||
<>
|
||||
<StyledDivider />
|
||||
<ReasonsList reasons={reasons} />
|
||||
</>
|
||||
)}
|
||||
<StyledDivider />
|
||||
<AddingToText />
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
UpscaleTabTooltipContent.displayName = 'UpscaleTabTooltipContent';
|
||||
|
||||
const WorkflowsTabTooltipContent = memo(({ prepend = false }: { prepend?: boolean }) => {
|
||||
const isConnected = useStore($isConnected);
|
||||
const templates = useStore($templates);
|
||||
|
||||
const selectIsReady = useMemo(
|
||||
() => buildSelectIsReadyToEnqueueWorkflowsTab({ isConnected, templates }),
|
||||
[isConnected, templates]
|
||||
);
|
||||
const selectReasons = useMemo(
|
||||
() => buildSelectReasonsWhyCannotEnqueueWorkflowsTab({ isConnected, templates }),
|
||||
[isConnected, templates]
|
||||
);
|
||||
|
||||
const isReady = useAppSelector(selectIsReady);
|
||||
const reasons = useAppSelector(selectReasons);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={1}>
|
||||
<IsReadyText isReady={isReady} prepend={prepend} />
|
||||
<QueueCountPredictionWorkflowsTab />
|
||||
{reasons.length > 0 && (
|
||||
<>
|
||||
<StyledDivider />
|
||||
<ReasonsList reasons={reasons} />
|
||||
</>
|
||||
)}
|
||||
<StyledDivider />
|
||||
<AddingToText />
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
WorkflowsTabTooltipContent.displayName = 'WorkflowsTabTooltipContent';
|
||||
|
||||
const QueueCountPredictionCanvasOrUpscaleTab = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const promptsCount = useAppSelector(selectPromptsCount);
|
||||
const iterationsCount = useAppSelector(selectIterations);
|
||||
|
||||
const text = useMemo(() => {
|
||||
const generationCount = Math.min(promptsCount * iterationsCount, 10000);
|
||||
const prompts = t('queue.prompts', { count: promptsCount });
|
||||
const iterations = t('queue.iterations', { count: iterationsCount });
|
||||
const generations = t('queue.generations', { count: generationCount });
|
||||
return `${promptsCount} ${prompts} \u00d7 ${iterationsCount} ${iterations} -> ${generationCount} ${generations}`.toLowerCase();
|
||||
}, [iterationsCount, promptsCount, t]);
|
||||
|
||||
return <Text>{text}</Text>;
|
||||
});
|
||||
QueueCountPredictionCanvasOrUpscaleTab.displayName = 'QueueCountPredictionCanvasOrUpscaleTab';
|
||||
|
||||
const QueueCountPredictionWorkflowsTab = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const batchSize = useAppSelector(selectWorkflowsBatchSize);
|
||||
const iterationsCount = useAppSelector(selectIterations);
|
||||
|
||||
const text = useMemo(() => {
|
||||
const generationCount = Math.min(batchSize * iterationsCount, 10000);
|
||||
const iterations = t('queue.iterations', { count: iterationsCount });
|
||||
const generations = t('queue.generations', { count: generationCount });
|
||||
return `${batchSize} ${t('queue.batchSize')} \u00d7 ${iterationsCount} ${iterations} -> ${generationCount} ${generations}`.toLowerCase();
|
||||
}, [batchSize, iterationsCount, t]);
|
||||
|
||||
return <Text>{text}</Text>;
|
||||
});
|
||||
QueueCountPredictionWorkflowsTab.displayName = 'QueueCountPredictionWorkflowsTab';
|
||||
|
||||
const IsReadyText = memo(({ isReady, prepend }: { isReady: boolean; prepend: boolean }) => {
|
||||
const { t } = useTranslation();
|
||||
const isLoadingDynamicPrompts = useAppSelector(selectDynamicPromptsIsLoading);
|
||||
const [_, enqueueMutation] = useEnqueueBatchMutation(enqueueMutationFixedCacheKeyOptions);
|
||||
|
||||
const text = useMemo(() => {
|
||||
if (enqueueMutation.isLoading) {
|
||||
return t('queue.enqueueing');
|
||||
}
|
||||
if (isLoadingDynamicPrompts) {
|
||||
return t('dynamicPrompts.loading');
|
||||
}
|
||||
if (isReady) {
|
||||
if (prepend) {
|
||||
return t('queue.queueFront');
|
||||
}
|
||||
return t('queue.queueBack');
|
||||
}
|
||||
return t('queue.notReady');
|
||||
}, [enqueueMutation.isLoading, isLoadingDynamicPrompts, isReady, prepend, t]);
|
||||
|
||||
return <Text fontWeight="semibold">{text}</Text>;
|
||||
});
|
||||
IsReadyText.displayName = 'IsReadyText';
|
||||
|
||||
const ReasonsList = memo(({ reasons }: { reasons: Reason[] }) => {
|
||||
return (
|
||||
<UnorderedList>
|
||||
{reasons.map((reason, i) => (
|
||||
<ReasonListItem key={`${reason.content}.${i}`} reason={reason} />
|
||||
))}
|
||||
</UnorderedList>
|
||||
);
|
||||
});
|
||||
ReasonsList.displayName = 'ReasonsList';
|
||||
|
||||
const ReasonListItem = memo(({ reason }: { reason: Reason }) => {
|
||||
return (
|
||||
<ListItem>
|
||||
<span>
|
||||
{reason.prefix && (
|
||||
<Text as="span" fontWeight="semibold">
|
||||
{reason.prefix}:{' '}
|
||||
</Text>
|
||||
)}
|
||||
<Text as="span">{reason.content}</Text>
|
||||
</span>
|
||||
</ListItem>
|
||||
);
|
||||
});
|
||||
ReasonListItem.displayName = 'ReasonListItem';
|
||||
|
||||
const StyledDivider = memo(() => <Divider opacity={0.2} borderColor="base.900" />);
|
||||
StyledDivider.displayName = 'StyledDivider';
|
||||
|
||||
const AddingToText = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const sendToCanvas = useAppSelector(selectSendToCanvas);
|
||||
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
|
||||
const autoAddBoardName = useBoardName(autoAddBoardId);
|
||||
|
||||
const addingTo = useMemo(() => {
|
||||
if (sendToCanvas) {
|
||||
return t('controlLayers.stagingOnCanvas');
|
||||
}
|
||||
return t('parameters.invoke.addingImagesTo');
|
||||
}, [sendToCanvas, t]);
|
||||
|
||||
const destination = useMemo(() => {
|
||||
if (sendToCanvas) {
|
||||
return t('queue.canvas');
|
||||
}
|
||||
if (autoAddBoardName) {
|
||||
return autoAddBoardName;
|
||||
}
|
||||
return t('boards.uncategorized');
|
||||
}, [autoAddBoardName, sendToCanvas, t]);
|
||||
|
||||
return (
|
||||
<Text fontStyle="oblique 10deg">
|
||||
{addingTo}{' '}
|
||||
<Text as="span" fontWeight="semibold">
|
||||
{destination}
|
||||
</Text>
|
||||
</Text>
|
||||
);
|
||||
});
|
||||
AddingToText.displayName = 'AddingToText';
|
||||
@@ -6,7 +6,7 @@ import { useInvoke } from 'features/queue/hooks/useInvoke';
|
||||
import { memo } from 'react';
|
||||
import { PiLightningFill, PiSparkleFill } from 'react-icons/pi';
|
||||
|
||||
import { QueueButtonTooltip } from './QueueButtonTooltip';
|
||||
import { InvokeButtonTooltip } from './InvokeButtonTooltip/InvokeButtonTooltip';
|
||||
|
||||
const invoke = 'Invoke';
|
||||
|
||||
@@ -18,7 +18,7 @@ export const InvokeButton = memo(() => {
|
||||
return (
|
||||
<Flex pos="relative" w="200px">
|
||||
<QueueIterationsNumberInput />
|
||||
<QueueButtonTooltip prepend={shift}>
|
||||
<InvokeButtonTooltip prepend={shift}>
|
||||
<Button
|
||||
onClick={shift ? queue.queueFront : queue.queueBack}
|
||||
isLoading={queue.isLoading || isLoadingDynamicPrompts}
|
||||
@@ -36,7 +36,7 @@ export const InvokeButton = memo(() => {
|
||||
<span>{invoke}</span>
|
||||
<Spacer />
|
||||
</Button>
|
||||
</QueueButtonTooltip>
|
||||
</InvokeButtonTooltip>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
import { IconButton, Menu, MenuButton, MenuGroup, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import {
|
||||
useNewCanvasSession,
|
||||
useNewGallerySession,
|
||||
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
|
||||
import { SessionMenuItems } from 'common/components/SessionMenuItems';
|
||||
import { useClearQueue } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
|
||||
import { QueueCountBadge } from 'features/queue/components/QueueCountBadge';
|
||||
import { usePauseProcessor } from 'features/queue/hooks/usePauseProcessor';
|
||||
@@ -12,16 +9,7 @@ import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import {
|
||||
PiImageBold,
|
||||
PiListBold,
|
||||
PiPaintBrushBold,
|
||||
PiPauseFill,
|
||||
PiPlayFill,
|
||||
PiQueueBold,
|
||||
PiTrashSimpleBold,
|
||||
PiXBold,
|
||||
} from 'react-icons/pi';
|
||||
import { PiListBold, PiPauseFill, PiPlayFill, PiQueueBold, PiTrashSimpleBold, PiXBold } from 'react-icons/pi';
|
||||
|
||||
export const QueueActionsMenuButton = memo(() => {
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
@@ -29,8 +17,6 @@ export const QueueActionsMenuButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const isPauseEnabled = useFeatureStatus('pauseQueue');
|
||||
const isResumeEnabled = useFeatureStatus('resumeQueue');
|
||||
const { newGallerySessionWithDialog } = useNewGallerySession();
|
||||
const { newCanvasSessionWithDialog } = useNewCanvasSession();
|
||||
const clearQueue = useClearQueue();
|
||||
const {
|
||||
resumeProcessor,
|
||||
@@ -52,12 +38,7 @@ export const QueueActionsMenuButton = memo(() => {
|
||||
<MenuButton ref={ref} as={IconButton} size="lg" aria-label="Queue Actions Menu" icon={<PiListBold />} />
|
||||
<MenuList>
|
||||
<MenuGroup title={t('common.new')}>
|
||||
<MenuItem icon={<PiImageBold />} onClick={newGallerySessionWithDialog}>
|
||||
{t('controlLayers.newGallerySession')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPaintBrushBold />} onClick={newCanvasSessionWithDialog}>
|
||||
{t('controlLayers.newCanvasSession')}
|
||||
</MenuItem>
|
||||
<SessionMenuItems />
|
||||
</MenuGroup>
|
||||
<MenuGroup title={t('queue.queue')}>
|
||||
<MenuItem
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
import type { TooltipProps } from '@invoke-ai/ui-library';
|
||||
import { Divider, Flex, ListItem, Text, Tooltip, UnorderedList } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useIsReadyToEnqueue } from 'common/hooks/useIsReadyToEnqueue';
|
||||
import { selectSendToCanvas } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { selectIterations, selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import {
|
||||
selectDynamicPromptsIsLoading,
|
||||
selectDynamicPromptsSlice,
|
||||
} from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
|
||||
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useEnqueueBatchMutation } from 'services/api/endpoints/queue';
|
||||
import { useBoardName } from 'services/api/hooks/useBoardName';
|
||||
|
||||
const selectPromptsCount = createSelector(selectParamsSlice, selectDynamicPromptsSlice, (params, dynamicPrompts) =>
|
||||
getShouldProcessPrompt(params.positivePrompt) ? dynamicPrompts.prompts.length : 1
|
||||
);
|
||||
|
||||
type Props = TooltipProps & {
|
||||
prepend?: boolean;
|
||||
};
|
||||
|
||||
export const QueueButtonTooltip = ({ prepend, children, ...rest }: PropsWithChildren<Props>) => {
|
||||
return (
|
||||
<Tooltip label={<TooltipContent prepend={prepend} />} maxW={512} {...rest}>
|
||||
{children}
|
||||
</Tooltip>
|
||||
);
|
||||
};
|
||||
|
||||
const TooltipContent = memo(({ prepend = false }: { prepend?: boolean }) => {
|
||||
const { t } = useTranslation();
|
||||
const { isReady, reasons } = useIsReadyToEnqueue();
|
||||
const sendToCanvas = useAppSelector(selectSendToCanvas);
|
||||
const isLoadingDynamicPrompts = useAppSelector(selectDynamicPromptsIsLoading);
|
||||
const promptsCount = useAppSelector(selectPromptsCount);
|
||||
const iterationsCount = useAppSelector(selectIterations);
|
||||
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
|
||||
const autoAddBoardName = useBoardName(autoAddBoardId);
|
||||
const [_, { isLoading }] = useEnqueueBatchMutation({
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
});
|
||||
const queueCountPredictionLabel = useMemo(() => {
|
||||
const generationCount = Math.min(promptsCount * iterationsCount, 10000);
|
||||
const prompts = t('queue.prompts', { count: promptsCount });
|
||||
const iterations = t('queue.iterations', { count: iterationsCount });
|
||||
const generations = t('queue.generations', { count: generationCount });
|
||||
return `${promptsCount} ${prompts} \u00d7 ${iterationsCount} ${iterations} -> ${generationCount} ${generations}`.toLowerCase();
|
||||
}, [iterationsCount, promptsCount, t]);
|
||||
|
||||
const label = useMemo(() => {
|
||||
if (isLoading) {
|
||||
return t('queue.enqueueing');
|
||||
}
|
||||
if (isLoadingDynamicPrompts) {
|
||||
return t('dynamicPrompts.loading');
|
||||
}
|
||||
if (isReady) {
|
||||
if (prepend) {
|
||||
return t('queue.queueFront');
|
||||
}
|
||||
return t('queue.queueBack');
|
||||
}
|
||||
return t('queue.notReady');
|
||||
}, [isLoading, isLoadingDynamicPrompts, isReady, prepend, t]);
|
||||
|
||||
const addingTo = useMemo(() => {
|
||||
if (sendToCanvas) {
|
||||
return t('controlLayers.stagingOnCanvas');
|
||||
}
|
||||
return t('parameters.invoke.addingImagesTo');
|
||||
}, [sendToCanvas, t]);
|
||||
|
||||
const destination = useMemo(() => {
|
||||
if (sendToCanvas) {
|
||||
return t('queue.canvas');
|
||||
}
|
||||
if (autoAddBoardName) {
|
||||
return autoAddBoardName;
|
||||
}
|
||||
return t('boards.uncategorized');
|
||||
}, [autoAddBoardName, sendToCanvas, t]);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={1}>
|
||||
<Text fontWeight="semibold">{label}</Text>
|
||||
<Text>{queueCountPredictionLabel}</Text>
|
||||
{reasons.length > 0 && (
|
||||
<>
|
||||
<Divider opacity={0.2} borderColor="base.900" />
|
||||
<UnorderedList>
|
||||
{reasons.map((reason, i) => (
|
||||
<ListItem key={`${reason.content}.${i}`}>
|
||||
<span>
|
||||
{reason.prefix && (
|
||||
<Text as="span" fontWeight="semibold">
|
||||
{reason.prefix}:{' '}
|
||||
</Text>
|
||||
)}
|
||||
<Text as="span">{reason.content}</Text>
|
||||
</span>
|
||||
</ListItem>
|
||||
))}
|
||||
</UnorderedList>
|
||||
</>
|
||||
)}
|
||||
<Divider opacity={0.2} borderColor="base.900" />
|
||||
<Text fontStyle="oblique 10deg">
|
||||
{addingTo}{' '}
|
||||
<Text as="span" fontWeight="semibold">
|
||||
{destination}
|
||||
</Text>
|
||||
</Text>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
TooltipContent.displayName = 'QueueButtonTooltipContent';
|
||||
@@ -1,17 +1,63 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import { $true } from 'app/store/nanostores/util';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useIsReadyToEnqueue } from 'common/hooks/useIsReadyToEnqueue';
|
||||
import { useCanvasManagerSafe } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import {
|
||||
buildSelectIsReadyToEnqueueCanvasTab,
|
||||
buildSelectIsReadyToEnqueueUpscaleTab,
|
||||
buildSelectIsReadyToEnqueueWorkflowsTab,
|
||||
} from 'features/queue/store/readiness';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import { useCallback } from 'react';
|
||||
import { useEnqueueBatchMutation } from 'services/api/endpoints/queue';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import { enqueueMutationFixedCacheKeyOptions, useEnqueueBatchMutation } from 'services/api/endpoints/queue';
|
||||
import { $isConnected } from 'services/events/stores';
|
||||
|
||||
export const useInvoke = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const tabName = useAppSelector(selectActiveTab);
|
||||
const { isReady } = useIsReadyToEnqueue();
|
||||
const [_, { isLoading }] = useEnqueueBatchMutation({
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
});
|
||||
const isConnected = useStore($isConnected);
|
||||
const canvasManager = useCanvasManagerSafe();
|
||||
const canvasIsFiltering = useStore(canvasManager?.stateApi.$isFiltering ?? $true);
|
||||
const canvasIsTransforming = useStore(canvasManager?.stateApi.$isTransforming ?? $true);
|
||||
const canvasIsRasterizing = useStore(canvasManager?.stateApi.$isRasterizing ?? $true);
|
||||
const canvasIsSelectingObject = useStore(canvasManager?.stateApi.$isSegmenting ?? $true);
|
||||
const canvasIsCompositing = useStore(canvasManager?.compositor.$isBusy ?? $true);
|
||||
const templates = useStore($templates);
|
||||
|
||||
const selectIsReady = useMemo(() => {
|
||||
if (tabName === 'canvas') {
|
||||
return buildSelectIsReadyToEnqueueCanvasTab({
|
||||
isConnected,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsSelectingObject,
|
||||
canvasIsCompositing,
|
||||
});
|
||||
}
|
||||
if (tabName === 'upscaling') {
|
||||
return buildSelectIsReadyToEnqueueUpscaleTab({ isConnected });
|
||||
}
|
||||
if (tabName === 'workflows') {
|
||||
return buildSelectIsReadyToEnqueueWorkflowsTab({ isConnected, templates });
|
||||
}
|
||||
return () => false;
|
||||
}, [
|
||||
tabName,
|
||||
isConnected,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsSelectingObject,
|
||||
canvasIsCompositing,
|
||||
templates,
|
||||
]);
|
||||
|
||||
const isReady = useAppSelector(selectIsReady);
|
||||
|
||||
const [_, { isLoading }] = useEnqueueBatchMutation(enqueueMutationFixedCacheKeyOptions);
|
||||
const queueBack = useCallback(() => {
|
||||
if (!isReady) {
|
||||
return;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import {
|
||||
enqueueMutationFixedCacheKeyOptions,
|
||||
useCancelQueueItemMutation,
|
||||
// useCancelByBatchIdsMutation,
|
||||
useClearQueueMutation,
|
||||
@@ -9,9 +10,9 @@ import {
|
||||
} from 'services/api/endpoints/queue';
|
||||
|
||||
export const useIsQueueMutationInProgress = () => {
|
||||
const [_triggerEnqueueBatch, { isLoading: isLoadingEnqueueBatch }] = useEnqueueBatchMutation({
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
});
|
||||
const [_triggerEnqueueBatch, { isLoading: isLoadingEnqueueBatch }] = useEnqueueBatchMutation(
|
||||
enqueueMutationFixedCacheKeyOptions
|
||||
);
|
||||
const [_triggerResumeProcessor, { isLoading: isLoadingResumeProcessor }] = useResumeProcessorMutation({
|
||||
fixedCacheKey: 'resumeProcessor',
|
||||
});
|
||||
|
||||
498
invokeai/frontend/web/src/features/queue/store/readiness.ts
Normal file
498
invokeai/frontend/web/src/features/queue/store/readiness.ts
Normal file
@@ -0,0 +1,498 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import type { AppConfig } from 'app/types/invokeai';
|
||||
import type { ParamsState } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasState } from 'features/controlLayers/store/types';
|
||||
import {
|
||||
getControlLayerWarnings,
|
||||
getGlobalReferenceImageWarnings,
|
||||
getInpaintMaskWarnings,
|
||||
getRasterLayerWarnings,
|
||||
getRegionalGuidanceWarnings,
|
||||
} from 'features/controlLayers/store/validators';
|
||||
import type { DynamicPromptsState } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
import type { NodesState, Templates } from 'features/nodes/store/types';
|
||||
import type { WorkflowSettingsState } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { selectWorkflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { isImageFieldCollectionInputInstance, isImageFieldCollectionInputTemplate } from 'features/nodes/types/field';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import type { UpscaleState } from 'features/parameters/store/upscaleSlice';
|
||||
import { selectUpscaleSlice } from 'features/parameters/store/upscaleSlice';
|
||||
import { selectConfigSlice } from 'features/system/store/configSlice';
|
||||
import i18n from 'i18next';
|
||||
import { forEach, upperFirst } from 'lodash-es';
|
||||
import { getConnectedEdges } from 'reactflow';
|
||||
|
||||
/**
|
||||
* This file contains selectors and utilities for determining the app is ready to enqueue generations. The handling
|
||||
* differs for each tab (canvas, upscaling, workflows).
|
||||
*
|
||||
* For example, the canvas tab needs to check the status of the canvas manager before enqueuing, while the workflows
|
||||
* tab needs to check the status of the nodes and their connections.
|
||||
*/
|
||||
|
||||
const LAYER_TYPE_TO_TKEY = {
|
||||
reference_image: 'controlLayers.referenceImage',
|
||||
inpaint_mask: 'controlLayers.inpaintMask',
|
||||
regional_guidance: 'controlLayers.regionalGuidance',
|
||||
raster_layer: 'controlLayers.rasterLayer',
|
||||
control_layer: 'controlLayers.controlLayer',
|
||||
} as const;
|
||||
|
||||
export type Reason = { prefix?: string; content: string };
|
||||
|
||||
const disconnectedReason = (t: typeof i18n.t) => ({ content: t('parameters.invoke.systemDisconnected') });
|
||||
|
||||
const getReasonsWhyCannotEnqueueWorkflowsTab = (arg: {
|
||||
isConnected: boolean;
|
||||
nodes: NodesState;
|
||||
workflowSettings: WorkflowSettingsState;
|
||||
templates: Templates;
|
||||
}): Reason[] => {
|
||||
const { isConnected, nodes, workflowSettings, templates } = arg;
|
||||
const reasons: Reason[] = [];
|
||||
|
||||
if (!isConnected) {
|
||||
reasons.push(disconnectedReason(i18n.t));
|
||||
}
|
||||
|
||||
if (workflowSettings.shouldValidateGraph) {
|
||||
if (!nodes.nodes.length) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noNodesInGraph') });
|
||||
}
|
||||
|
||||
nodes.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const nodeTemplate = templates[node.data.type];
|
||||
|
||||
if (!nodeTemplate) {
|
||||
// Node type not found
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingNodeTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
const connectedEdges = getConnectedEdges([node], nodes.edges);
|
||||
|
||||
forEach(node.data.inputs, (field) => {
|
||||
const fieldTemplate = nodeTemplate.inputs[field.name];
|
||||
const hasConnection = connectedEdges.some(
|
||||
(edge) => edge.target === node.id && edge.targetHandle === field.name
|
||||
);
|
||||
|
||||
if (!fieldTemplate) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingFieldTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
const baseTKeyOptions = {
|
||||
nodeLabel: node.data.label || nodeTemplate.title,
|
||||
fieldLabel: field.label || fieldTemplate.title,
|
||||
};
|
||||
|
||||
if (fieldTemplate.required && field.value === undefined && !hasConnection) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingInputForField', baseTKeyOptions) });
|
||||
return;
|
||||
} else if (
|
||||
field.value &&
|
||||
isImageFieldCollectionInputInstance(field) &&
|
||||
isImageFieldCollectionInputTemplate(fieldTemplate)
|
||||
) {
|
||||
// Image collections may have min or max items to validate
|
||||
// TODO(psyche): generalize this to other collection types
|
||||
if (fieldTemplate.minItems !== undefined && fieldTemplate.minItems > 0 && field.value.length === 0) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.collectionEmpty', baseTKeyOptions) });
|
||||
return;
|
||||
}
|
||||
if (fieldTemplate.minItems !== undefined && field.value.length < fieldTemplate.minItems) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.collectionTooFewItems', {
|
||||
...baseTKeyOptions,
|
||||
size: field.value.length,
|
||||
minItems: fieldTemplate.minItems,
|
||||
}),
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (fieldTemplate.maxItems !== undefined && field.value.length > fieldTemplate.maxItems) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.collectionTooManyItems', {
|
||||
...baseTKeyOptions,
|
||||
size: field.value.length,
|
||||
maxItems: fieldTemplate.maxItems,
|
||||
}),
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
return reasons;
|
||||
};
|
||||
|
||||
const getReasonsWhyCannotEnqueueUpscaleTab = (arg: {
|
||||
isConnected: boolean;
|
||||
upscale: UpscaleState;
|
||||
config: AppConfig;
|
||||
params: ParamsState;
|
||||
}) => {
|
||||
const { isConnected, upscale, config, params } = arg;
|
||||
const reasons: Reason[] = [];
|
||||
|
||||
if (!isConnected) {
|
||||
reasons.push(disconnectedReason(i18n.t));
|
||||
}
|
||||
|
||||
if (!upscale.upscaleInitialImage) {
|
||||
reasons.push({ content: i18n.t('upscaling.missingUpscaleInitialImage') });
|
||||
} else if (config.maxUpscaleDimension) {
|
||||
const { width, height } = upscale.upscaleInitialImage;
|
||||
const { scale } = upscale;
|
||||
|
||||
const maxPixels = config.maxUpscaleDimension ** 2;
|
||||
const upscaledPixels = width * scale * height * scale;
|
||||
|
||||
if (upscaledPixels > maxPixels) {
|
||||
reasons.push({ content: i18n.t('upscaling.exceedsMaxSize') });
|
||||
}
|
||||
}
|
||||
const model = params.model;
|
||||
if (model && !['sd-1', 'sdxl'].includes(model.base)) {
|
||||
// When we are using an upsupported model, do not add the other warnings
|
||||
reasons.push({ content: i18n.t('upscaling.incompatibleBaseModel') });
|
||||
} else {
|
||||
// Using a compatible model, add all warnings
|
||||
if (!model) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelSelected') });
|
||||
}
|
||||
if (!upscale.upscaleModel) {
|
||||
reasons.push({ content: i18n.t('upscaling.missingUpscaleModel') });
|
||||
}
|
||||
if (!upscale.tileControlnetModel) {
|
||||
reasons.push({ content: i18n.t('upscaling.missingTileControlNetModel') });
|
||||
}
|
||||
}
|
||||
|
||||
return reasons;
|
||||
};
|
||||
|
||||
const getReasonsWhyCannotEnqueueCanvasTab = (arg: {
|
||||
isConnected: boolean;
|
||||
canvas: CanvasState;
|
||||
params: ParamsState;
|
||||
dynamicPrompts: DynamicPromptsState;
|
||||
canvasIsFiltering: boolean;
|
||||
canvasIsTransforming: boolean;
|
||||
canvasIsRasterizing: boolean;
|
||||
canvasIsCompositing: boolean;
|
||||
canvasIsSelectingObject: boolean;
|
||||
}) => {
|
||||
const {
|
||||
isConnected,
|
||||
canvas,
|
||||
params,
|
||||
dynamicPrompts,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsCompositing,
|
||||
canvasIsSelectingObject,
|
||||
} = arg;
|
||||
const { model, positivePrompt } = params;
|
||||
const reasons: Reason[] = [];
|
||||
|
||||
if (!isConnected) {
|
||||
reasons.push(disconnectedReason(i18n.t));
|
||||
}
|
||||
|
||||
if (canvasIsFiltering) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsFiltering') });
|
||||
}
|
||||
if (canvasIsTransforming) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsTransforming') });
|
||||
}
|
||||
if (canvasIsRasterizing) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsRasterizing') });
|
||||
}
|
||||
if (canvasIsCompositing) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsCompositing') });
|
||||
}
|
||||
if (canvasIsSelectingObject) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsSelectingObject') });
|
||||
}
|
||||
|
||||
if (dynamicPrompts.prompts.length === 0 && getShouldProcessPrompt(positivePrompt)) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noPrompts') });
|
||||
}
|
||||
|
||||
if (!model) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelSelected') });
|
||||
}
|
||||
|
||||
if (model?.base === 'flux') {
|
||||
const { bbox } = canvas;
|
||||
|
||||
if (!params.t5EncoderModel) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noT5EncoderModelSelected') });
|
||||
}
|
||||
if (!params.clipEmbedModel) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noCLIPEmbedModelSelected') });
|
||||
}
|
||||
if (!params.fluxVAE) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noFLUXVAEModelSelected') });
|
||||
}
|
||||
if (bbox.scaleMethod === 'none') {
|
||||
if (bbox.rect.width % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleBboxWidth', { width: bbox.rect.width }),
|
||||
});
|
||||
}
|
||||
if (bbox.rect.height % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleBboxHeight', { height: bbox.rect.height }),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (bbox.scaledSize.width % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleScaledBboxWidth', {
|
||||
width: bbox.scaledSize.width,
|
||||
}),
|
||||
});
|
||||
}
|
||||
if (bbox.scaledSize.height % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleScaledBboxHeight', {
|
||||
height: bbox.scaledSize.height,
|
||||
}),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
canvas.controlLayers.entities
|
||||
.filter((controlLayer) => controlLayer.isEnabled)
|
||||
.forEach((controlLayer, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY['control_layer']);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems = getControlLayerWarnings(controlLayer, model);
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.map((p) => i18n.t(p)).join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
canvas.referenceImages.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems = getGlobalReferenceImageWarnings(entity, model);
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.map((p) => i18n.t(p)).join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
canvas.regionalGuidance.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems = getRegionalGuidanceWarnings(entity, model);
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.map((p) => i18n.t(p)).join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
canvas.rasterLayers.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems = getRasterLayerWarnings(entity, model);
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.map((p) => i18n.t(p)).join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
canvas.inpaintMasks.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems = getInpaintMaskWarnings(entity, model);
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.map((p) => i18n.t(p)).join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
return reasons;
|
||||
};
|
||||
|
||||
export const buildSelectReasonsWhyCannotEnqueueCanvasTab = (arg: {
|
||||
isConnected: boolean;
|
||||
canvasIsFiltering: boolean;
|
||||
canvasIsTransforming: boolean;
|
||||
canvasIsRasterizing: boolean;
|
||||
canvasIsCompositing: boolean;
|
||||
canvasIsSelectingObject: boolean;
|
||||
}) => {
|
||||
const {
|
||||
isConnected,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsCompositing,
|
||||
canvasIsSelectingObject,
|
||||
} = arg;
|
||||
|
||||
return createSelector(
|
||||
selectCanvasSlice,
|
||||
selectParamsSlice,
|
||||
selectDynamicPromptsSlice,
|
||||
(canvas, params, dynamicPrompts) =>
|
||||
getReasonsWhyCannotEnqueueCanvasTab({
|
||||
isConnected,
|
||||
canvas,
|
||||
params,
|
||||
dynamicPrompts,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsCompositing,
|
||||
canvasIsSelectingObject,
|
||||
})
|
||||
);
|
||||
};
|
||||
|
||||
export const buildSelectIsReadyToEnqueueCanvasTab = (arg: {
|
||||
isConnected: boolean;
|
||||
canvasIsFiltering: boolean;
|
||||
canvasIsTransforming: boolean;
|
||||
canvasIsRasterizing: boolean;
|
||||
canvasIsCompositing: boolean;
|
||||
canvasIsSelectingObject: boolean;
|
||||
}) => {
|
||||
const {
|
||||
isConnected,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsCompositing,
|
||||
canvasIsSelectingObject,
|
||||
} = arg;
|
||||
|
||||
return createSelector(
|
||||
selectCanvasSlice,
|
||||
selectParamsSlice,
|
||||
selectDynamicPromptsSlice,
|
||||
(canvas, params, dynamicPrompts) =>
|
||||
getReasonsWhyCannotEnqueueCanvasTab({
|
||||
isConnected,
|
||||
canvas,
|
||||
params,
|
||||
dynamicPrompts,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsCompositing,
|
||||
canvasIsSelectingObject,
|
||||
}).length === 0
|
||||
);
|
||||
};
|
||||
|
||||
export const buildSelectReasonsWhyCannotEnqueueUpscaleTab = (arg: { isConnected: boolean }) => {
|
||||
const { isConnected } = arg;
|
||||
return createSelector(selectUpscaleSlice, selectConfigSlice, selectParamsSlice, (upscale, config, params) =>
|
||||
getReasonsWhyCannotEnqueueUpscaleTab({ isConnected, upscale, config, params })
|
||||
);
|
||||
};
|
||||
|
||||
export const buildSelectIsReadyToEnqueueUpscaleTab = (arg: { isConnected: boolean }) => {
|
||||
const { isConnected } = arg;
|
||||
|
||||
return createSelector(
|
||||
selectUpscaleSlice,
|
||||
selectConfigSlice,
|
||||
selectParamsSlice,
|
||||
(upscale, config, params) =>
|
||||
getReasonsWhyCannotEnqueueUpscaleTab({ isConnected, upscale, config, params }).length === 0
|
||||
);
|
||||
};
|
||||
|
||||
export const buildSelectReasonsWhyCannotEnqueueWorkflowsTab = (arg: { isConnected: boolean; templates: Templates }) => {
|
||||
const { isConnected, templates } = arg;
|
||||
|
||||
return createSelector(selectNodesSlice, selectWorkflowSettingsSlice, (nodes, workflowSettings) =>
|
||||
getReasonsWhyCannotEnqueueWorkflowsTab({
|
||||
isConnected,
|
||||
nodes,
|
||||
workflowSettings,
|
||||
templates,
|
||||
})
|
||||
);
|
||||
};
|
||||
|
||||
export const buildSelectIsReadyToEnqueueWorkflowsTab = (arg: { isConnected: boolean; templates: Templates }) => {
|
||||
const { isConnected, templates } = arg;
|
||||
|
||||
return createSelector(
|
||||
selectNodesSlice,
|
||||
selectWorkflowSettingsSlice,
|
||||
(nodes, workflowSettings) =>
|
||||
getReasonsWhyCannotEnqueueWorkflowsTab({
|
||||
isConnected,
|
||||
nodes,
|
||||
workflowSettings,
|
||||
templates,
|
||||
}).length === 0
|
||||
);
|
||||
};
|
||||
|
||||
export const selectPromptsCount = createSelector(
|
||||
selectParamsSlice,
|
||||
selectDynamicPromptsSlice,
|
||||
(params, dynamicPrompts) => (getShouldProcessPrompt(params.positivePrompt) ? dynamicPrompts.prompts.length : 1)
|
||||
);
|
||||
|
||||
export const selectWorkflowsBatchSize = createSelector(selectNodesSlice, ({ nodes }) =>
|
||||
// The batch size is the product of all batch nodes' collection sizes
|
||||
nodes.filter(isInvocationNode).reduce((batchSize, node) => {
|
||||
if (!isImageFieldCollectionInputInstance(node.data.inputs.images)) {
|
||||
return batchSize;
|
||||
}
|
||||
// If the batch size is not set, default to 1
|
||||
batchSize = batchSize || 1;
|
||||
// Multiply the batch size by the number of images in the batch
|
||||
batchSize = batchSize * (node.data.inputs.images.value?.length ?? 0);
|
||||
|
||||
return batchSize;
|
||||
}, 0)
|
||||
);
|
||||
@@ -26,7 +26,6 @@ import { SettingsDeveloperLogLevel } from 'features/system/components/SettingsMo
|
||||
import { SettingsDeveloperLogNamespaces } from 'features/system/components/SettingsModal/SettingsDeveloperLogNamespaces';
|
||||
import { useClearIntermediates } from 'features/system/components/SettingsModal/useClearIntermediates';
|
||||
import { StickyScrollable } from 'features/system/components/StickyScrollable';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import {
|
||||
selectSystemShouldAntialiasProgressImage,
|
||||
selectSystemShouldConfirmOnDelete,
|
||||
@@ -59,6 +58,7 @@ type ConfigOptions = {
|
||||
shouldShowResetWebUiText?: boolean;
|
||||
shouldShowClearIntermediates?: boolean;
|
||||
shouldShowLocalizationToggle?: boolean;
|
||||
shouldShowInvocationProgressDetailSetting?: boolean;
|
||||
};
|
||||
|
||||
const defaultConfig: ConfigOptions = {
|
||||
@@ -66,6 +66,7 @@ const defaultConfig: ConfigOptions = {
|
||||
shouldShowResetWebUiText: true,
|
||||
shouldShowClearIntermediates: true,
|
||||
shouldShowLocalizationToggle: true,
|
||||
shouldShowInvocationProgressDetailSetting: true,
|
||||
};
|
||||
|
||||
type SettingsModalProps = {
|
||||
@@ -107,7 +108,6 @@ const SettingsModal = ({ config = defaultConfig, children }: SettingsModalProps)
|
||||
const shouldEnableModelDescriptions = useAppSelector(selectSystemShouldEnableModelDescriptions);
|
||||
const shouldConfirmOnNewSession = useAppSelector(selectSystemShouldConfirmOnNewSession);
|
||||
const shouldShowInvocationProgressDetail = useAppSelector(selectSystemShouldShowInvocationProgressDetail);
|
||||
const isInvocationProgressAlertEnabled = useFeatureStatus('invocationProgressAlert');
|
||||
const onToggleConfirmOnNewSession = useCallback(() => {
|
||||
dispatch(shouldConfirmOnNewSessionToggled());
|
||||
}, [dispatch]);
|
||||
@@ -233,7 +233,7 @@ const SettingsModal = ({ config = defaultConfig, children }: SettingsModalProps)
|
||||
onChange={handleChangeShouldAntialiasProgressImage}
|
||||
/>
|
||||
</FormControl>
|
||||
{isInvocationProgressAlertEnabled && (
|
||||
{Boolean(config?.shouldShowInvocationProgressDetailSetting) && (
|
||||
<FormControl>
|
||||
<FormLabel>{t('settings.showDetailedInvocationProgress')}</FormLabel>
|
||||
<Switch
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
import { ExternalLink, Flex, Spacer, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import type { VideoData } from 'features/system/components/VideosModal/data';
|
||||
import { videoModalLinkClicked } from 'features/system/store/actions';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const formatTime = ({ minutes, seconds }: { minutes: number; seconds: number }) => {
|
||||
return `${minutes}:${seconds.toString().padStart(2, '0')}`;
|
||||
};
|
||||
|
||||
export const VideoCard = memo(({ video }: { video: VideoData }) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const { tKey, link, length } = video;
|
||||
const handleLinkClick = useCallback(() => {
|
||||
dispatch(videoModalLinkClicked(t(`supportVideos.videos.${tKey}.title`)));
|
||||
}, [dispatch, t, tKey]);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={1}>
|
||||
<Flex alignItems="center" gap={2}>
|
||||
<Text fontSize="md" fontWeight="semibold">
|
||||
{t(`supportVideos.videos.${tKey}.title`)}
|
||||
</Text>
|
||||
<Spacer />
|
||||
<Text variant="subtext">{formatTime(length)}</Text>
|
||||
<ExternalLink fontSize="sm" href={link} label={t('supportVideos.watch')} onClick={handleLinkClick} />
|
||||
</Flex>
|
||||
<Text fontSize="md" variant="subtext">
|
||||
{t(`supportVideos.videos.${tKey}.description`)}
|
||||
</Text>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
VideoCard.displayName = 'VideoCard';
|
||||
@@ -0,0 +1,20 @@
|
||||
import { Divider } from '@invoke-ai/ui-library';
|
||||
import { StickyScrollable } from 'features/system/components/StickyScrollable';
|
||||
import { gettingStartedVideos, type VideoData } from 'features/system/components/VideosModal/data';
|
||||
import { VideoCard } from 'features/system/components/VideosModal/VideoCard';
|
||||
import { Fragment, memo } from 'react';
|
||||
|
||||
export const VideoCardList = memo(({ category, videos }: { category: string; videos: VideoData[] }) => {
|
||||
return (
|
||||
<StickyScrollable title={category}>
|
||||
{videos.map((video, i) => (
|
||||
<Fragment key={`${video.tKey}-${i}`}>
|
||||
<VideoCard video={video} />
|
||||
{i < gettingStartedVideos.length - 1 && <Divider />}
|
||||
</Fragment>
|
||||
))}
|
||||
</StickyScrollable>
|
||||
);
|
||||
});
|
||||
|
||||
VideoCardList.displayName = 'VideoCardList';
|
||||
@@ -0,0 +1,100 @@
|
||||
import {
|
||||
ExternalLink,
|
||||
Flex,
|
||||
Modal,
|
||||
ModalBody,
|
||||
ModalCloseButton,
|
||||
ModalContent,
|
||||
ModalFooter,
|
||||
ModalHeader,
|
||||
ModalOverlay,
|
||||
Text,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
||||
import { buildUseDisclosure } from 'common/hooks/useBoolean';
|
||||
import {
|
||||
controlCanvasVideos,
|
||||
gettingStartedVideos,
|
||||
studioSessionsPlaylistLink,
|
||||
} from 'features/system/components/VideosModal/data';
|
||||
import { VideoCardList } from 'features/system/components/VideosModal/VideoCardList';
|
||||
import { videoModalLinkClicked } from 'features/system/store/actions';
|
||||
import { discordLink } from 'features/system/store/constants';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { Trans, useTranslation } from 'react-i18next';
|
||||
|
||||
export const [useVideosModal] = buildUseDisclosure(false);
|
||||
|
||||
const StudioSessionsPlaylistLink = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const handleLinkClick = useCallback(() => {
|
||||
dispatch(videoModalLinkClicked('Studio session playlist'));
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<ExternalLink
|
||||
fontWeight="semibold"
|
||||
href={studioSessionsPlaylistLink}
|
||||
display="inline-flex"
|
||||
label="Studio Sessions playlist"
|
||||
onClick={handleLinkClick}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
const DiscordLink = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const handleLinkClick = useCallback(() => {
|
||||
dispatch(videoModalLinkClicked('Discord'));
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<ExternalLink
|
||||
fontWeight="semibold"
|
||||
href={discordLink}
|
||||
display="inline-flex"
|
||||
label="Discord"
|
||||
onClick={handleLinkClick}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
const components = {
|
||||
StudioSessionsPlaylistLink: <StudioSessionsPlaylistLink />,
|
||||
DiscordLink: <DiscordLink />,
|
||||
};
|
||||
|
||||
export const VideosModal = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const videosModal = useVideosModal();
|
||||
|
||||
return (
|
||||
<Modal isOpen={videosModal.isOpen} onClose={videosModal.close} size="2xl" isCentered useInert={false}>
|
||||
<ModalOverlay />
|
||||
<ModalContent maxH="80vh" h="80vh">
|
||||
<ModalHeader bg="none">{t('supportVideos.supportVideos')}</ModalHeader>
|
||||
<ModalCloseButton />
|
||||
<ModalBody>
|
||||
<ScrollableContent>
|
||||
<Flex flexDir="column" gap={4}>
|
||||
<Flex flexDir="column" gap={2} pb={2}>
|
||||
<Text fontSize="md">
|
||||
<Trans i18nKey="supportVideos.studioSessionsDesc1" components={components} />
|
||||
</Text>
|
||||
<Text fontSize="md">
|
||||
<Trans i18nKey="supportVideos.studioSessionsDesc2" components={components} />
|
||||
</Text>
|
||||
</Flex>
|
||||
<VideoCardList category={t('supportVideos.gettingStarted')} videos={gettingStartedVideos} />
|
||||
<VideoCardList category={t('supportVideos.controlCanvas')} videos={controlCanvasVideos} />
|
||||
</Flex>
|
||||
</ScrollableContent>
|
||||
</ModalBody>
|
||||
<ModalFooter />
|
||||
</ModalContent>
|
||||
</Modal>
|
||||
);
|
||||
});
|
||||
|
||||
VideosModal.displayName = 'VideosModal';
|
||||
@@ -0,0 +1,30 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useVideosModal } from 'features/system/components/VideosModal/VideosModal';
|
||||
import { videoModalOpened } from 'features/system/store/actions';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiYoutubeLogoFill } from 'react-icons/pi';
|
||||
|
||||
export const VideosModalButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const videosModal = useVideosModal();
|
||||
|
||||
const onClickOpen = useCallback(() => {
|
||||
dispatch(videoModalOpened());
|
||||
videosModal.open();
|
||||
}, [videosModal, dispatch]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
aria-label={t('supportVideos.supportVideos')}
|
||||
variant="link"
|
||||
icon={<PiYoutubeLogoFill fontSize={20} />}
|
||||
boxSize={8}
|
||||
onClick={onClickOpen}
|
||||
/>
|
||||
);
|
||||
});
|
||||
VideosModalButton.displayName = 'VideosModalButton';
|
||||
@@ -0,0 +1,88 @@
|
||||
/**
|
||||
* To add a support video, you'll need to add the video to the list below.
|
||||
*
|
||||
* The `tKey` is a sub-key in the translation file `invokeai/frontend/web/public/locales/en.json`.
|
||||
* Add the title and description under `supportVideos.videos`, following the existing format.
|
||||
*/
|
||||
|
||||
export type VideoData = {
|
||||
tKey: string;
|
||||
link: string;
|
||||
length: {
|
||||
minutes: number;
|
||||
seconds: number;
|
||||
};
|
||||
};
|
||||
|
||||
export const gettingStartedVideos: VideoData[] = [
|
||||
{
|
||||
tKey: 'creatingYourFirstImage',
|
||||
link: 'https://www.youtube.com/watch?v=jVi2XgSGrfY&list=PLvWK1Kc8iXGrQy8r9TYg6QdUuJ5MMx-ZO&index=1&t=29s&pp=iAQB',
|
||||
length: { minutes: 6, seconds: 0 },
|
||||
},
|
||||
{
|
||||
tKey: 'usingControlLayersAndReferenceGuides',
|
||||
link: 'https://www.youtube.com/watch?v=crgw6bEgyrw&list=PLvWK1Kc8iXGrQy8r9TYg6QdUuJ5MMx-ZO&index=2&t=70s&pp=iAQB',
|
||||
length: { minutes: 5, seconds: 30 },
|
||||
},
|
||||
{
|
||||
tKey: 'understandingImageToImageAndDenoising',
|
||||
link: 'https://www.youtube.com/watch?v=tvj8-0s6S2U&list=PLvWK1Kc8iXGrQy8r9TYg6QdUuJ5MMx-ZO&index=3&t=1s&pp=iAQB',
|
||||
length: { minutes: 2, seconds: 37 },
|
||||
},
|
||||
{
|
||||
tKey: 'exploringAIModelsAndConceptAdapters',
|
||||
link: 'https://www.youtube.com/watch?v=iwBmBQMZ0UA&list=PLvWK1Kc8iXGrQy8r9TYg6QdUuJ5MMx-ZO&index=4&pp=iAQB',
|
||||
length: { minutes: 8, seconds: 52 },
|
||||
},
|
||||
{
|
||||
tKey: 'creatingAndComposingOnInvokesControlCanvas',
|
||||
link: 'https://www.youtube.com/watch?v=O4LaFcYFxlA',
|
||||
length: { minutes: 2, seconds: 52 },
|
||||
},
|
||||
{
|
||||
tKey: 'upscaling',
|
||||
link: 'https://www.youtube.com/watch?v=OCb19_P0nro&list=PLvWK1Kc8iXGrQy8r9TYg6QdUuJ5MMx-ZO&index=6&t=2s&pp=iAQB',
|
||||
length: { minutes: 4, seconds: 0 },
|
||||
},
|
||||
];
|
||||
|
||||
export const controlCanvasVideos: VideoData[] = [
|
||||
{
|
||||
tKey: 'howDoIGenerateAndSaveToTheGallery',
|
||||
link: 'https://youtu.be/Tl-69JvwJ2s?si=dbjmBc1iDAUpE1k5&t=26',
|
||||
length: { minutes: 0, seconds: 49 },
|
||||
},
|
||||
{
|
||||
tKey: 'howDoIEditOnTheCanvas',
|
||||
link: 'https://youtu.be/Tl-69JvwJ2s?si=U_bFl9HsvSuejbxp&t=76',
|
||||
length: { minutes: 0, seconds: 58 },
|
||||
},
|
||||
{
|
||||
tKey: 'howDoIDoImageToImageTransformation',
|
||||
link: 'https://youtu.be/Tl-69JvwJ2s?si=fjhTeY-yZ3qsEzEM&t=138',
|
||||
length: { minutes: 0, seconds: 51 },
|
||||
},
|
||||
{
|
||||
tKey: 'howDoIUseControlNetsAndControlLayers',
|
||||
link: 'https://youtu.be/Tl-69JvwJ2s?si=x5KcYvkHbvR9ifsX&t=192',
|
||||
length: { minutes: 1, seconds: 41 },
|
||||
},
|
||||
{
|
||||
tKey: 'howDoIUseGlobalIPAdaptersAndReferenceImages',
|
||||
link: 'https://youtu.be/Tl-69JvwJ2s?si=O940rNHiHGKXknK2&t=297',
|
||||
length: { minutes: 0, seconds: 43 },
|
||||
},
|
||||
{
|
||||
tKey: 'howDoIUseInpaintMasks',
|
||||
link: 'https://youtu.be/Tl-69JvwJ2s?si=3DZhmerkzUmvJJSn&t=345',
|
||||
length: { minutes: 1, seconds: 9 },
|
||||
},
|
||||
{
|
||||
tKey: 'howDoIOutpaint',
|
||||
link: 'https://youtu.be/Tl-69JvwJ2s?si=IIwkGZLq1PfLf80Q&t=420',
|
||||
length: { minutes: 0, seconds: 48 },
|
||||
},
|
||||
];
|
||||
|
||||
export const studioSessionsPlaylistLink = 'https://www.youtube.com/playlist?list=PLvWK1Kc8iXGq_8tWZqnwDVaf9uhlDC09U';
|
||||
@@ -0,0 +1,4 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
|
||||
export const videoModalLinkClicked = createAction<string>('system/videoModalLinkClicked');
|
||||
export const videoModalOpened = createAction('system/videoModalOpened');
|
||||
@@ -4,7 +4,7 @@ import { ToolChooser } from 'features/controlLayers/components/Tool/ToolChooser'
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { useClearQueue } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
|
||||
import { QueueButtonTooltip } from 'features/queue/components/QueueButtonTooltip';
|
||||
import { InvokeButtonTooltip } from 'features/queue/components/InvokeButtonTooltip/InvokeButtonTooltip';
|
||||
import { useCancelCurrentQueueItem } from 'features/queue/hooks/useCancelCurrentQueueItem';
|
||||
import { useInvoke } from 'features/queue/hooks/useInvoke';
|
||||
import type { UsePanelReturn } from 'features/ui/hooks/usePanel';
|
||||
@@ -62,7 +62,7 @@ const FloatingSidePanelButtons = (props: Props) => {
|
||||
flexGrow={1}
|
||||
/>
|
||||
</Tooltip>
|
||||
<QueueButtonTooltip prepend={shift} placement="end">
|
||||
<InvokeButtonTooltip prepend={shift} placement="end">
|
||||
<IconButton
|
||||
aria-label={t('queue.queueBack')}
|
||||
onClick={shift ? queue.queueFront : queue.queueBack}
|
||||
@@ -72,7 +72,7 @@ const FloatingSidePanelButtons = (props: Props) => {
|
||||
colorScheme="invokeYellow"
|
||||
flexGrow={1}
|
||||
/>
|
||||
</QueueButtonTooltip>
|
||||
</InvokeButtonTooltip>
|
||||
<Tooltip label={t('queue.cancelTooltip')} placement="end">
|
||||
<IconButton
|
||||
isDisabled={cancelCurrent.isDisabled}
|
||||
|
||||
@@ -4,6 +4,7 @@ import { $customNavComponent } from 'app/store/nanostores/customNavComponent';
|
||||
import InvokeAILogoComponent from 'features/system/components/InvokeAILogoComponent';
|
||||
import SettingsMenu from 'features/system/components/SettingsModal/SettingsMenu';
|
||||
import StatusIndicator from 'features/system/components/StatusIndicator';
|
||||
import { VideosModalButton } from 'features/system/components/VideosModal/VideosModalButton';
|
||||
import { TabMountGate } from 'features/ui/components/TabMountGate';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -39,6 +40,7 @@ export const VerticalNavBar = memo(() => {
|
||||
<Spacer />
|
||||
<StatusIndicator />
|
||||
<Notifications />
|
||||
<VideosModalButton />
|
||||
{customNavComponent ? customNavComponent : <SettingsMenu />}
|
||||
</Flex>
|
||||
);
|
||||
|
||||
@@ -425,3 +425,7 @@ const resetListQueryData = (
|
||||
// we have to manually kick off another query to get the first page and re-initialize the list
|
||||
dispatch(queueApi.endpoints.listQueueItems.initiate(undefined));
|
||||
};
|
||||
|
||||
export const enqueueMutationFixedCacheKeyOptions = {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
} as const;
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
__version__ = "5.4.2rc1"
|
||||
__version__ = "5.4.3rc1"
|
||||
|
||||
@@ -56,7 +56,7 @@ dependencies = [
|
||||
"torchmetrics",
|
||||
"torchsde",
|
||||
"torchvision",
|
||||
"transformers==4.41.1",
|
||||
"transformers==4.46.3",
|
||||
|
||||
# Core application dependencies, pinned for reproducible builds.
|
||||
"fastapi-events==0.11.1",
|
||||
@@ -164,7 +164,7 @@ version = { attr = "invokeai.version.__version__" }
|
||||
"*.png",
|
||||
]
|
||||
"invokeai.assets.fonts" = ["**/*.ttf"]
|
||||
"invokeai.backend" = ["**.png"]
|
||||
"invokeai.backend" = ["**.png", "**/*.icc"]
|
||||
"invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"]
|
||||
"invokeai.frontend.web.dist" = ["**"]
|
||||
"invokeai.frontend.web.static" = ["**"]
|
||||
|
||||
Reference in New Issue
Block a user