mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-20 05:18:17 -05:00
Compare commits
200 Commits
psyche/fea
...
ryan/flux-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d04ec3f95 | ||
|
|
4581a37a48 | ||
|
|
54b7f9a063 | ||
|
|
7d488a5352 | ||
|
|
4d7667f63d | ||
|
|
08704ee8ec | ||
|
|
5910892c33 | ||
|
|
46a09d9e90 | ||
|
|
df0c7d73f3 | ||
|
|
3905c97e32 | ||
|
|
0be796a808 | ||
|
|
7dd33b0f39 | ||
|
|
484aaf1595 | ||
|
|
c276b60af9 | ||
|
|
5d8dd6e26e | ||
|
|
5bca68d873 | ||
|
|
64364e7911 | ||
|
|
6565cea039 | ||
|
|
3ebd8d6c07 | ||
|
|
e970185161 | ||
|
|
fa5653cdf7 | ||
|
|
9a7b000995 | ||
|
|
3a27242838 | ||
|
|
8cfb032051 | ||
|
|
06a9d4e2b2 | ||
|
|
ed46acee79 | ||
|
|
b54463d294 | ||
|
|
faee79dc95 | ||
|
|
965cd76e33 | ||
|
|
e5e8cbf34c | ||
|
|
3412a52594 | ||
|
|
e01f66b026 | ||
|
|
53abdde242 | ||
|
|
94c088300f | ||
|
|
3741a6f5e0 | ||
|
|
059336258f | ||
|
|
2c23b8414c | ||
|
|
271cc52c80 | ||
|
|
20356c0746 | ||
|
|
e44458609f | ||
|
|
69d86a7696 | ||
|
|
56db1a9292 | ||
|
|
cf50e5eeee | ||
|
|
c9c07968d2 | ||
|
|
97d0757176 | ||
|
|
0f51b677a9 | ||
|
|
56ca94c3a9 | ||
|
|
28d169f859 | ||
|
|
92f71d99ee | ||
|
|
0764c02b1d | ||
|
|
081c7569fe | ||
|
|
20f6532ee8 | ||
|
|
b9e8910478 | ||
|
|
ded8391e3c | ||
|
|
e9dd2c396a | ||
|
|
0d86de0cb5 | ||
|
|
bad1149504 | ||
|
|
fda7aaa7ca | ||
|
|
85c616fa34 | ||
|
|
549f4e9794 | ||
|
|
ef8ededd2f | ||
|
|
1948ffe106 | ||
|
|
c70f4404c4 | ||
|
|
b157ae928c | ||
|
|
7a0871992d | ||
|
|
b38e2e14f4 | ||
|
|
7c0e70ec84 | ||
|
|
a89ae9d2bf | ||
|
|
ad1fcb3f07 | ||
|
|
87d74b910b | ||
|
|
7ad1c297a4 | ||
|
|
fbc629faa6 | ||
|
|
7baa6b3c09 | ||
|
|
53d482bade | ||
|
|
5aca04b51b | ||
|
|
ea8787c8ff | ||
|
|
cead2c4445 | ||
|
|
f76ac1808c | ||
|
|
f01210861b | ||
|
|
f757f23ef0 | ||
|
|
872a6ef209 | ||
|
|
4267e5ffc4 | ||
|
|
a69c5ff9ef | ||
|
|
3ebd8d7d1b | ||
|
|
1fd80d54a4 | ||
|
|
991f63e455 | ||
|
|
6a1efd3527 | ||
|
|
0eadc0dd9e | ||
|
|
481423d678 | ||
|
|
89ede0aef3 | ||
|
|
359bdee9c6 | ||
|
|
0e6fba3763 | ||
|
|
652502d7a6 | ||
|
|
91d981a49e | ||
|
|
24f61d21b2 | ||
|
|
eb9a4177c5 | ||
|
|
3c43351a5b | ||
|
|
b1359b6dff | ||
|
|
bddccf6d2f | ||
|
|
21ffaab2a2 | ||
|
|
1e969f938f | ||
|
|
9c6c86ee4f | ||
|
|
6b53a48b48 | ||
|
|
c813fa3fc0 | ||
|
|
a08e61184a | ||
|
|
a0d62a5f41 | ||
|
|
616c0f11e1 | ||
|
|
e1626a4e49 | ||
|
|
6ab891a319 | ||
|
|
492de41316 | ||
|
|
c064efc866 | ||
|
|
1a0885bfb1 | ||
|
|
e8b202d0a5 | ||
|
|
c6fc82f756 | ||
|
|
9a77e951d2 | ||
|
|
8bd4207a27 | ||
|
|
0bb601aaf7 | ||
|
|
2da25a0043 | ||
|
|
51d0931898 | ||
|
|
357b68d1ba | ||
|
|
d9ddb6c32e | ||
|
|
ad02a99a83 | ||
|
|
b707dafc7b | ||
|
|
02906c8f5d | ||
|
|
8538e508f1 | ||
|
|
8c333ffd14 | ||
|
|
72ace5fdff | ||
|
|
9b7583fc84 | ||
|
|
989eee338e | ||
|
|
acc3d7b91b | ||
|
|
49de868658 | ||
|
|
b1702c7d90 | ||
|
|
e49e19ea13 | ||
|
|
c9f91f391e | ||
|
|
4cb6b2b701 | ||
|
|
7d132ea148 | ||
|
|
1088accd91 | ||
|
|
8d237d8f8b | ||
|
|
0c86a3232d | ||
|
|
dbfb0359cb | ||
|
|
b4c2aa596b | ||
|
|
87e89b7995 | ||
|
|
9b089430e2 | ||
|
|
f2b0025958 | ||
|
|
4b390906bc | ||
|
|
c5b8efe03b | ||
|
|
4d08d00ad8 | ||
|
|
9b0130262b | ||
|
|
878093f64e | ||
|
|
d5ff7ef250 | ||
|
|
f36583f866 | ||
|
|
829bc1bc7d | ||
|
|
17c7b57145 | ||
|
|
6a12189542 | ||
|
|
96a31a5563 | ||
|
|
067747eca9 | ||
|
|
c7878fddc6 | ||
|
|
54c51e0a06 | ||
|
|
1640ea0298 | ||
|
|
0c32ae9775 | ||
|
|
fdb8ca5165 | ||
|
|
571faf6d7c | ||
|
|
bdbdb22b74 | ||
|
|
9bbb5644af | ||
|
|
e90ad19f22 | ||
|
|
0ba11e8f73 | ||
|
|
1cf7600f5b | ||
|
|
4f9d12b872 | ||
|
|
68c3b0649b | ||
|
|
8ef8bd4261 | ||
|
|
50897ba066 | ||
|
|
3510643870 | ||
|
|
ca9cb1c9ef | ||
|
|
b89caa02bd | ||
|
|
eaf4e08c44 | ||
|
|
9179619077 | ||
|
|
13cb5f0ba2 | ||
|
|
7e52fc1c17 | ||
|
|
7f60a4a282 | ||
|
|
f05efd3270 | ||
|
|
7732b5d478 | ||
|
|
a2a1934b66 | ||
|
|
dff6570078 | ||
|
|
04e4fb63af | ||
|
|
83609d5008 | ||
|
|
2618ed0ae7 | ||
|
|
bb3cedddd5 | ||
|
|
0e6cb91863 | ||
|
|
a0fefcd43f | ||
|
|
a5f8c23dee | ||
|
|
7bb4ea57c6 | ||
|
|
75dc961bcb | ||
|
|
a9a1f6ef21 | ||
|
|
aa40161f26 | ||
|
|
6efa812874 | ||
|
|
8a683f5a3c | ||
|
|
f4b0b6a93d | ||
|
|
1337c33ad3 | ||
|
|
496b02a3bc | ||
|
|
7b5efc2203 |
14
SECURITY.md
Normal file
14
SECURITY.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Only the latest version of Invoke will receive security updates.
|
||||
We do not currently maintain multiple versions of the application with updates.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
To report a vulnerability, contact the Invoke team directly at security@invoke.ai
|
||||
|
||||
At this time, we do not maintain a formal bug bounty program.
|
||||
|
||||
You can also share identified security issues with our team on huntr.com
|
||||
@@ -38,7 +38,7 @@ This project is a combined effort of dedicated people from across the world. [C
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](https://github.com/invoke-ai/InvokeAI/blob/main/CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
|
||||
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](https://github.com/invoke-ai/InvokeAI/blob/main/docs/CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
|
||||
|
||||
By making a contribution to this project, you certify that:
|
||||
|
||||
|
||||
@@ -99,7 +99,6 @@ their descriptions.
|
||||
| Scale Latents | Scales latents by a given factor. |
|
||||
| Segment Anything Processor | Applies segment anything processing to image |
|
||||
| Show Image | Displays a provided image, and passes it forward in the pipeline. |
|
||||
| Step Param Easing | Experimental per-step parameter easing for denoising steps |
|
||||
| String Primitive Collection | A collection of string primitive values |
|
||||
| String Primitive | A string primitive value |
|
||||
| Subtract Integers | Subtracts two numbers |
|
||||
|
||||
@@ -63,6 +63,7 @@ class Classification(str, Enum, metaclass=MetaEnum):
|
||||
- `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation.
|
||||
- `Deprecated`: The invocation is deprecated and may be removed in a future version.
|
||||
- `Internal`: The invocation is not intended for use by end-users. It may be changed or removed at any time, but is exposed for users to play with.
|
||||
- `Special`: The invocation is a special case and does not fit into any of the other classifications.
|
||||
"""
|
||||
|
||||
Stable = "stable"
|
||||
@@ -70,6 +71,7 @@ class Classification(str, Enum, metaclass=MetaEnum):
|
||||
Prototype = "prototype"
|
||||
Deprecated = "deprecated"
|
||||
Internal = "internal"
|
||||
Special = "special"
|
||||
|
||||
|
||||
class UIConfigBase(BaseModel):
|
||||
|
||||
@@ -1,98 +1,120 @@
|
||||
from typing import Any, Union
|
||||
from typing import Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
from PIL import Image
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, LatentsField
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, LatentsField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
def slerp(
|
||||
t: Union[float, np.ndarray],
|
||||
v0: Union[torch.Tensor, np.ndarray],
|
||||
v1: Union[torch.Tensor, np.ndarray],
|
||||
device: torch.device,
|
||||
DOT_THRESHOLD: float = 0.9995,
|
||||
):
|
||||
"""
|
||||
Spherical linear interpolation
|
||||
Args:
|
||||
t (float/np.ndarray): Float value between 0.0 and 1.0
|
||||
v0 (np.ndarray): Starting vector
|
||||
v1 (np.ndarray): Final vector
|
||||
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
||||
colineal. Not recommended to alter this.
|
||||
Returns:
|
||||
v2 (np.ndarray): Interpolation vector between v0 and v1
|
||||
"""
|
||||
inputs_are_torch = False
|
||||
if not isinstance(v0, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v0 = v0.detach().cpu().numpy()
|
||||
if not isinstance(v1, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v1 = v1.detach().cpu().numpy()
|
||||
|
||||
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
||||
if np.abs(dot) > DOT_THRESHOLD:
|
||||
v2 = (1 - t) * v0 + t * v1
|
||||
else:
|
||||
theta_0 = np.arccos(dot)
|
||||
sin_theta_0 = np.sin(theta_0)
|
||||
theta_t = theta_0 * t
|
||||
sin_theta_t = np.sin(theta_t)
|
||||
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
||||
s1 = sin_theta_t / sin_theta_0
|
||||
v2 = s0 * v0 + s1 * v1
|
||||
|
||||
if inputs_are_torch:
|
||||
v2 = torch.from_numpy(v2).to(device)
|
||||
|
||||
return v2
|
||||
|
||||
|
||||
@invocation(
|
||||
"lblend",
|
||||
title="Blend Latents",
|
||||
tags=["latents", "blend"],
|
||||
tags=["latents", "blend", "mask"],
|
||||
category="latents",
|
||||
version="1.0.3",
|
||||
version="1.1.0",
|
||||
)
|
||||
class BlendLatentsInvocation(BaseInvocation):
|
||||
"""Blend two latents using a given alpha. Latents must have same size."""
|
||||
"""Blend two latents using a given alpha. If a mask is provided, the second latents will be masked before blending.
|
||||
Latents must have same size. Masking functionality added by @dwringer."""
|
||||
|
||||
latents_a: LatentsField = InputField(
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
latents_b: LatentsField = InputField(
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
alpha: float = InputField(default=0.5, description=FieldDescriptions.blend_alpha)
|
||||
latents_a: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
latents_b: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
mask: Optional[ImageField] = InputField(default=None, description="Mask for blending in latents B")
|
||||
alpha: float = InputField(ge=0, default=0.5, description=FieldDescriptions.blend_alpha)
|
||||
|
||||
def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor:
|
||||
if mask_image.mode != "L":
|
||||
mask_image = mask_image.convert("L")
|
||||
mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
|
||||
if mask_tensor.dim() == 3:
|
||||
mask_tensor = mask_tensor.unsqueeze(0)
|
||||
return mask_tensor
|
||||
|
||||
def replace_tensor_from_masked_tensor(
|
||||
self, tensor: torch.Tensor, other_tensor: torch.Tensor, mask_tensor: torch.Tensor
|
||||
):
|
||||
output = tensor.clone()
|
||||
mask_tensor = mask_tensor.expand(output.shape)
|
||||
if output.dtype != torch.float16:
|
||||
output = torch.add(output, mask_tensor * torch.sub(other_tensor, tensor))
|
||||
else:
|
||||
output = torch.add(output, mask_tensor.half() * torch.sub(other_tensor, tensor))
|
||||
return output
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
latents_a = context.tensors.load(self.latents_a.latents_name)
|
||||
latents_b = context.tensors.load(self.latents_b.latents_name)
|
||||
if self.mask is None:
|
||||
mask_tensor = torch.zeros(latents_a.shape[-2:])
|
||||
else:
|
||||
mask_tensor = self.prep_mask_tensor(context.images.get_pil(self.mask.image_name))
|
||||
mask_tensor = tv_resize(mask_tensor, latents_a.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
|
||||
latents_b = self.replace_tensor_from_masked_tensor(latents_b, latents_a, mask_tensor)
|
||||
|
||||
if latents_a.shape != latents_b.shape:
|
||||
raise Exception("Latents to blend must be the same size.")
|
||||
raise ValueError("Latents to blend must be the same size.")
|
||||
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
def slerp(
|
||||
t: Union[float, npt.NDArray[Any]], # FIXME: maybe use np.float32 here?
|
||||
v0: Union[torch.Tensor, npt.NDArray[Any]],
|
||||
v1: Union[torch.Tensor, npt.NDArray[Any]],
|
||||
DOT_THRESHOLD: float = 0.9995,
|
||||
) -> Union[torch.Tensor, npt.NDArray[Any]]:
|
||||
"""
|
||||
Spherical linear interpolation
|
||||
Args:
|
||||
t (float/np.ndarray): Float value between 0.0 and 1.0
|
||||
v0 (np.ndarray): Starting vector
|
||||
v1 (np.ndarray): Final vector
|
||||
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
||||
colineal. Not recommended to alter this.
|
||||
Returns:
|
||||
v2 (np.ndarray): Interpolation vector between v0 and v1
|
||||
"""
|
||||
inputs_are_torch = False
|
||||
if not isinstance(v0, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v0 = v0.detach().cpu().numpy()
|
||||
if not isinstance(v1, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v1 = v1.detach().cpu().numpy()
|
||||
|
||||
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
||||
if np.abs(dot) > DOT_THRESHOLD:
|
||||
v2 = (1 - t) * v0 + t * v1
|
||||
else:
|
||||
theta_0 = np.arccos(dot)
|
||||
sin_theta_0 = np.sin(theta_0)
|
||||
theta_t = theta_0 * t
|
||||
sin_theta_t = np.sin(theta_t)
|
||||
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
||||
s1 = sin_theta_t / sin_theta_0
|
||||
v2 = s0 * v0 + s1 * v1
|
||||
|
||||
if inputs_are_torch:
|
||||
v2_torch: torch.Tensor = torch.from_numpy(v2).to(device)
|
||||
return v2_torch
|
||||
else:
|
||||
assert isinstance(v2, np.ndarray)
|
||||
return v2
|
||||
|
||||
# blend
|
||||
bl = slerp(self.alpha, latents_a, latents_b)
|
||||
assert isinstance(bl, torch.Tensor)
|
||||
blended_latents: torch.Tensor = bl # for type checking convenience
|
||||
blended_latents = slerp(self.alpha, latents_a, latents_b, device)
|
||||
|
||||
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||
blended_latents = blended_latents.to("cpu")
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
name = context.tensors.save(tensor=blended_latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=blended_latents, seed=self.latents_a.seed)
|
||||
return LatentsOutput.build(latents_name=name, latents=blended_latents)
|
||||
|
||||
@@ -95,6 +95,7 @@ class CompelInvocation(BaseInvocation):
|
||||
ti_manager,
|
||||
),
|
||||
):
|
||||
context.util.signal_progress("Building conditioning")
|
||||
assert isinstance(text_encoder, CLIPTextModel)
|
||||
assert isinstance(tokenizer, CLIPTokenizer)
|
||||
compel = Compel(
|
||||
@@ -191,6 +192,7 @@ class SDXLPromptInvocationBase:
|
||||
ti_manager,
|
||||
),
|
||||
):
|
||||
context.util.signal_progress("Building conditioning")
|
||||
assert isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection))
|
||||
assert isinstance(tokenizer, CLIPTokenizer)
|
||||
|
||||
|
||||
1563
invokeai/app/invocations/composition-nodes.py
Normal file
1563
invokeai/app/invocations/composition-nodes.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -65,6 +65,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation):
|
||||
img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
|
||||
# TODO:
|
||||
context.util.signal_progress("Running VAE encoder")
|
||||
masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone())
|
||||
|
||||
masked_latents_name = context.tensors.save(tensor=masked_latents)
|
||||
|
||||
@@ -131,6 +131,7 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
image_tensor = image_tensor.unsqueeze(0)
|
||||
img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
|
||||
context.util.signal_progress("Running VAE encoder")
|
||||
masked_latents = ImageToLatentsInvocation.vae_encode(
|
||||
vae_info, self.fp32, self.tiled, masked_image.clone()
|
||||
)
|
||||
|
||||
@@ -250,6 +250,11 @@ class FluxConditioningField(BaseModel):
|
||||
"""A conditioning tensor primitive value"""
|
||||
|
||||
conditioning_name: str = Field(description="The name of conditioning tensor")
|
||||
mask: Optional[TensorField] = Field(
|
||||
default=None,
|
||||
description="The mask associated with this conditioning tensor. Excluded regions should be set to False, "
|
||||
"included regions should be set to True.",
|
||||
)
|
||||
|
||||
|
||||
class SD3ConditioningField(BaseModel):
|
||||
|
||||
@@ -30,6 +30,7 @@ from invokeai.backend.flux.controlnet.xlabs_controlnet_flux import XLabsControlN
|
||||
from invokeai.backend.flux.denoise import denoise
|
||||
from invokeai.backend.flux.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
|
||||
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.ip_adapter.xlabs_ip_adapter_flux import XlabsIpAdapterFlux
|
||||
@@ -42,6 +43,7 @@ from invokeai.backend.flux.sampling_utils import (
|
||||
pack,
|
||||
unpack,
|
||||
)
|
||||
from invokeai.backend.flux.text_conditioning import FluxTextConditioning
|
||||
from invokeai.backend.lora.conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_patcher import LoRAPatcher
|
||||
@@ -87,10 +89,10 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
input=Input.Connection,
|
||||
title="Transformer",
|
||||
)
|
||||
positive_text_conditioning: FluxConditioningField = InputField(
|
||||
positive_text_conditioning: FluxConditioningField | list[FluxConditioningField] = InputField(
|
||||
description=FieldDescriptions.positive_cond, input=Input.Connection
|
||||
)
|
||||
negative_text_conditioning: FluxConditioningField | None = InputField(
|
||||
negative_text_conditioning: FluxConditioningField | list[FluxConditioningField] | None = InputField(
|
||||
default=None,
|
||||
description="Negative conditioning tensor. Can be None if cfg_scale is 1.0.",
|
||||
input=Input.Connection,
|
||||
@@ -139,36 +141,12 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
|
||||
def _load_text_conditioning(
|
||||
self, context: InvocationContext, conditioning_name: str, dtype: torch.dtype
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
# Load the conditioning data.
|
||||
cond_data = context.conditioning.load(conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
flux_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(flux_conditioning, FLUXConditioningInfo)
|
||||
flux_conditioning = flux_conditioning.to(dtype=dtype)
|
||||
t5_embeddings = flux_conditioning.t5_embeds
|
||||
clip_embeddings = flux_conditioning.clip_embeds
|
||||
return t5_embeddings, clip_embeddings
|
||||
|
||||
def _run_diffusion(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
):
|
||||
inference_dtype = torch.bfloat16
|
||||
|
||||
# Load the conditioning data.
|
||||
pos_t5_embeddings, pos_clip_embeddings = self._load_text_conditioning(
|
||||
context, self.positive_text_conditioning.conditioning_name, inference_dtype
|
||||
)
|
||||
neg_t5_embeddings: torch.Tensor | None = None
|
||||
neg_clip_embeddings: torch.Tensor | None = None
|
||||
if self.negative_text_conditioning is not None:
|
||||
neg_t5_embeddings, neg_clip_embeddings = self._load_text_conditioning(
|
||||
context, self.negative_text_conditioning.conditioning_name, inference_dtype
|
||||
)
|
||||
|
||||
# Load the input latents, if provided.
|
||||
init_latents = context.tensors.load(self.latents.latents_name) if self.latents else None
|
||||
if init_latents is not None:
|
||||
@@ -183,15 +161,45 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
dtype=inference_dtype,
|
||||
seed=self.seed,
|
||||
)
|
||||
b, _c, latent_h, latent_w = noise.shape
|
||||
packed_h = latent_h // 2
|
||||
packed_w = latent_w // 2
|
||||
|
||||
# Load the conditioning data.
|
||||
pos_text_conditionings = self._load_text_conditioning(
|
||||
context=context,
|
||||
cond_field=self.positive_text_conditioning,
|
||||
packed_height=packed_h,
|
||||
packed_width=packed_w,
|
||||
dtype=inference_dtype,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
)
|
||||
neg_text_conditionings: list[FluxTextConditioning] | None = None
|
||||
if self.negative_text_conditioning is not None:
|
||||
neg_text_conditionings = self._load_text_conditioning(
|
||||
context=context,
|
||||
cond_field=self.negative_text_conditioning,
|
||||
packed_height=packed_h,
|
||||
packed_width=packed_w,
|
||||
dtype=inference_dtype,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
)
|
||||
pos_regional_prompting_extension = RegionalPromptingExtension.from_text_conditioning(
|
||||
pos_text_conditionings, img_seq_len=packed_h * packed_w
|
||||
)
|
||||
neg_regional_prompting_extension = (
|
||||
RegionalPromptingExtension.from_text_conditioning(neg_text_conditionings, img_seq_len=packed_h * packed_w)
|
||||
if neg_text_conditionings
|
||||
else None
|
||||
)
|
||||
|
||||
transformer_info = context.models.load(self.transformer.transformer)
|
||||
is_schnell = "schnell" in transformer_info.config.config_path
|
||||
|
||||
# Calculate the timestep schedule.
|
||||
image_seq_len = noise.shape[-1] * noise.shape[-2] // 4
|
||||
timesteps = get_schedule(
|
||||
num_steps=self.num_steps,
|
||||
image_seq_len=image_seq_len,
|
||||
image_seq_len=packed_h * packed_w,
|
||||
shift=not is_schnell,
|
||||
)
|
||||
|
||||
@@ -228,28 +236,17 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
inpaint_mask = self._prep_inpaint_mask(context, x)
|
||||
|
||||
b, _c, latent_h, latent_w = x.shape
|
||||
img_ids = generate_img_ids(h=latent_h, w=latent_w, batch_size=b, device=x.device, dtype=x.dtype)
|
||||
|
||||
pos_bs, pos_t5_seq_len, _ = pos_t5_embeddings.shape
|
||||
pos_txt_ids = torch.zeros(
|
||||
pos_bs, pos_t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device()
|
||||
)
|
||||
neg_txt_ids: torch.Tensor | None = None
|
||||
if neg_t5_embeddings is not None:
|
||||
neg_bs, neg_t5_seq_len, _ = neg_t5_embeddings.shape
|
||||
neg_txt_ids = torch.zeros(
|
||||
neg_bs, neg_t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device()
|
||||
)
|
||||
|
||||
# Pack all latent tensors.
|
||||
init_latents = pack(init_latents) if init_latents is not None else None
|
||||
inpaint_mask = pack(inpaint_mask) if inpaint_mask is not None else None
|
||||
noise = pack(noise)
|
||||
x = pack(x)
|
||||
|
||||
# Now that we have 'packed' the latent tensors, verify that we calculated the image_seq_len correctly.
|
||||
assert image_seq_len == x.shape[1]
|
||||
# Now that we have 'packed' the latent tensors, verify that we calculated the image_seq_len, packed_h, and
|
||||
# packed_w correctly.
|
||||
assert packed_h * packed_w == x.shape[1]
|
||||
|
||||
# Prepare inpaint extension.
|
||||
inpaint_extension: InpaintExtension | None = None
|
||||
@@ -338,12 +335,8 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
model=transformer,
|
||||
img=x,
|
||||
img_ids=img_ids,
|
||||
txt=pos_t5_embeddings,
|
||||
txt_ids=pos_txt_ids,
|
||||
vec=pos_clip_embeddings,
|
||||
neg_txt=neg_t5_embeddings,
|
||||
neg_txt_ids=neg_txt_ids,
|
||||
neg_vec=neg_clip_embeddings,
|
||||
pos_regional_prompting_extension=pos_regional_prompting_extension,
|
||||
neg_regional_prompting_extension=neg_regional_prompting_extension,
|
||||
timesteps=timesteps,
|
||||
step_callback=self._build_step_callback(context),
|
||||
guidance=self.guidance,
|
||||
@@ -357,6 +350,43 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
x = unpack(x.float(), self.height, self.width)
|
||||
return x
|
||||
|
||||
def _load_text_conditioning(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
cond_field: FluxConditioningField | list[FluxConditioningField],
|
||||
packed_height: int,
|
||||
packed_width: int,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
) -> list[FluxTextConditioning]:
|
||||
"""Load text conditioning data from a FluxConditioningField or a list of FluxConditioningFields."""
|
||||
# Normalize to a list of FluxConditioningFields.
|
||||
cond_list = [cond_field] if isinstance(cond_field, FluxConditioningField) else cond_field
|
||||
|
||||
text_conditionings: list[FluxTextConditioning] = []
|
||||
for cond_field in cond_list:
|
||||
# Load the text embeddings.
|
||||
cond_data = context.conditioning.load(cond_field.conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
flux_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(flux_conditioning, FLUXConditioningInfo)
|
||||
flux_conditioning = flux_conditioning.to(dtype=dtype, device=device)
|
||||
t5_embeddings = flux_conditioning.t5_embeds
|
||||
clip_embeddings = flux_conditioning.clip_embeds
|
||||
|
||||
# Load the mask, if provided.
|
||||
mask: Optional[torch.Tensor] = None
|
||||
if cond_field.mask is not None:
|
||||
mask = context.tensors.load(cond_field.mask.tensor_name)
|
||||
mask = mask.to(device=device)
|
||||
mask = RegionalPromptingExtension.preprocess_regional_prompt_mask(
|
||||
mask, packed_height, packed_width, dtype, device
|
||||
)
|
||||
|
||||
text_conditionings.append(FluxTextConditioning(t5_embeddings, clip_embeddings, mask))
|
||||
|
||||
return text_conditionings
|
||||
|
||||
@classmethod
|
||||
def prep_cfg_scale(
|
||||
cls, cfg_scale: float | list[float], timesteps: list[float], cfg_scale_start_step: int, cfg_scale_end_step: int
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
from contextlib import ExitStack
|
||||
from typing import Iterator, Literal, Tuple
|
||||
from typing import Iterator, Literal, Optional, Tuple
|
||||
|
||||
import torch
|
||||
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
FluxConditioningField,
|
||||
Input,
|
||||
InputField,
|
||||
TensorField,
|
||||
UIComponent,
|
||||
)
|
||||
from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
||||
from invokeai.app.invocations.primitives import FluxConditioningOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
@@ -22,7 +29,7 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Condit
|
||||
title="FLUX Text Encoding",
|
||||
tags=["prompt", "conditioning", "flux"],
|
||||
category="conditioning",
|
||||
version="1.1.0",
|
||||
version="1.2.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxTextEncoderInvocation(BaseInvocation):
|
||||
@@ -41,7 +48,15 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
t5_max_seq_len: Literal[256, 512] = InputField(
|
||||
description="Max sequence length for the T5 encoder. Expected to be 256 for FLUX schnell models and 512 for FLUX dev models."
|
||||
)
|
||||
prompt: str = InputField(description="Text prompt to encode.")
|
||||
use_short_t5_seq_len: bool = InputField(
|
||||
description="Use a shorter sequence length for the T5 encoder if a short prompt is used. This can improve "
|
||||
+ "performance and reduce peak memory, but may result in slightly different image outputs.",
|
||||
default=True,
|
||||
)
|
||||
prompt: str = InputField(description="Text prompt to encode.", ui_component=UIComponent.Textarea)
|
||||
mask: Optional[TensorField] = InputField(
|
||||
default=None, description="A mask defining the region that this conditioning prompt applies to."
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> FluxConditioningOutput:
|
||||
@@ -54,7 +69,9 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
conditioning_name = context.conditioning.save(conditioning_data)
|
||||
return FluxConditioningOutput.build(conditioning_name)
|
||||
return FluxConditioningOutput(
|
||||
conditioning=FluxConditioningField(conditioning_name=conditioning_name, mask=self.mask)
|
||||
)
|
||||
|
||||
def _t5_encode(self, context: InvocationContext) -> torch.Tensor:
|
||||
t5_tokenizer_info = context.models.load(self.t5_encoder.tokenizer)
|
||||
@@ -62,6 +79,12 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
|
||||
prompt = [self.prompt]
|
||||
|
||||
valid_seq_lens = [self.t5_max_seq_len]
|
||||
if self.use_short_t5_seq_len:
|
||||
# We allow a minimum sequence length of 128. Going too short results in more significant image chagnes.
|
||||
valid_seq_lens = list(range(128, self.t5_max_seq_len, 128))
|
||||
valid_seq_lens.append(self.t5_max_seq_len)
|
||||
|
||||
with (
|
||||
t5_text_encoder_info as t5_text_encoder,
|
||||
t5_tokenizer_info as t5_tokenizer,
|
||||
@@ -69,9 +92,10 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
assert isinstance(t5_text_encoder, T5EncoderModel)
|
||||
assert isinstance(t5_tokenizer, T5Tokenizer)
|
||||
|
||||
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
|
||||
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False)
|
||||
|
||||
prompt_embeds = t5_encoder(prompt)
|
||||
context.util.signal_progress("Running T5 encoder")
|
||||
prompt_embeds = t5_encoder(prompt, valid_seq_lens)
|
||||
|
||||
assert isinstance(prompt_embeds, torch.Tensor)
|
||||
return prompt_embeds
|
||||
@@ -109,9 +133,10 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
# There are currently no supported CLIP quantized models. Add support here if needed.
|
||||
raise ValueError(f"Unsupported model format: {clip_text_encoder_config.format}")
|
||||
|
||||
clip_encoder = HFEncoder(clip_text_encoder, clip_tokenizer, True, 77)
|
||||
clip_encoder = HFEncoder(clip_text_encoder, clip_tokenizer, True)
|
||||
|
||||
pooled_prompt_embeds = clip_encoder(prompt)
|
||||
context.util.signal_progress("Running CLIP encoder")
|
||||
pooled_prompt_embeds = clip_encoder(prompt, [77])
|
||||
|
||||
assert isinstance(pooled_prompt_embeds, torch.Tensor)
|
||||
return pooled_prompt_embeds
|
||||
|
||||
@@ -41,7 +41,8 @@ class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
def _vae_decode(self, vae_info: LoadedModel, latents: torch.Tensor) -> Image.Image:
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
latents = latents.to(device=TorchDevice.choose_torch_device(), dtype=TorchDevice.choose_torch_dtype())
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
latents = latents.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
|
||||
img = vae.decode(latents)
|
||||
|
||||
img = img.clamp(-1, 1)
|
||||
@@ -53,6 +54,7 @@ class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
context.util.signal_progress("Running VAE")
|
||||
image = self._vae_decode(vae_info=vae_info, latents=latents)
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
@@ -44,9 +44,8 @@ class FluxVaeEncodeInvocation(BaseInvocation):
|
||||
generator = torch.Generator(device=TorchDevice.choose_torch_device()).manual_seed(0)
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
image_tensor = image_tensor.to(
|
||||
device=TorchDevice.choose_torch_device(), dtype=TorchDevice.choose_torch_dtype()
|
||||
)
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
|
||||
latents = vae.encode(image_tensor, sample=True, generator=generator)
|
||||
return latents
|
||||
|
||||
@@ -60,6 +59,7 @@ class FluxVaeEncodeInvocation(BaseInvocation):
|
||||
if image_tensor.dim() == 3:
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
context.util.signal_progress("Running VAE")
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
|
||||
59
invokeai/app/invocations/image_panels.py
Normal file
59
invokeai/app/invocations/image_panels.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from pydantic import ValidationInfo, field_validator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import InputField, OutputField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
|
||||
|
||||
@invocation_output("image_panel_coordinate_output")
|
||||
class ImagePanelCoordinateOutput(BaseInvocationOutput):
|
||||
x_left: int = OutputField(description="The left x-coordinate of the panel.")
|
||||
y_top: int = OutputField(description="The top y-coordinate of the panel.")
|
||||
width: int = OutputField(description="The width of the panel.")
|
||||
height: int = OutputField(description="The height of the panel.")
|
||||
|
||||
|
||||
@invocation(
|
||||
"image_panel_layout",
|
||||
title="Image Panel Layout",
|
||||
tags=["image", "panel", "layout"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class ImagePanelLayoutInvocation(BaseInvocation):
|
||||
"""Get the coordinates of a single panel in a grid. (If the full image shape cannot be divided evenly into panels,
|
||||
then the grid may not cover the entire image.)
|
||||
"""
|
||||
|
||||
width: int = InputField(description="The width of the entire grid.")
|
||||
height: int = InputField(description="The height of the entire grid.")
|
||||
num_cols: int = InputField(ge=1, default=1, description="The number of columns in the grid.")
|
||||
num_rows: int = InputField(ge=1, default=1, description="The number of rows in the grid.")
|
||||
panel_col_idx: int = InputField(ge=0, default=0, description="The column index of the panel to be processed.")
|
||||
panel_row_idx: int = InputField(ge=0, default=0, description="The row index of the panel to be processed.")
|
||||
|
||||
@field_validator("panel_col_idx")
|
||||
def validate_panel_col_idx(cls, v: int, info: ValidationInfo) -> int:
|
||||
if v < 0 or v >= info.data["num_cols"]:
|
||||
raise ValueError(f"panel_col_idx must be between 0 and {info.data['num_cols'] - 1}")
|
||||
return v
|
||||
|
||||
@field_validator("panel_row_idx")
|
||||
def validate_panel_row_idx(cls, v: int, info: ValidationInfo) -> int:
|
||||
if v < 0 or v >= info.data["num_rows"]:
|
||||
raise ValueError(f"panel_row_idx must be between 0 and {info.data['num_rows'] - 1}")
|
||||
return v
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImagePanelCoordinateOutput:
|
||||
x_left = self.panel_col_idx * (self.width // self.num_cols)
|
||||
y_top = self.panel_row_idx * (self.height // self.num_rows)
|
||||
width = self.width // self.num_cols
|
||||
height = self.height // self.num_rows
|
||||
return ImagePanelCoordinateOutput(x_left=x_left, y_top=y_top, width=width, height=height)
|
||||
@@ -117,6 +117,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
if image_tensor.dim() == 3:
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
context.util.signal_progress("Running VAE encoder")
|
||||
latents = self.vae_encode(
|
||||
vae_info=vae_info, upcast=self.fp32, tiled=self.tiled, image_tensor=image_tensor, tile_size=self.tile_size
|
||||
)
|
||||
|
||||
@@ -60,6 +60,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
|
||||
with SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes), vae_info as vae:
|
||||
context.util.signal_progress("Running VAE decoder")
|
||||
assert isinstance(vae, (AutoencoderKL, AutoencoderTiny))
|
||||
latents = latents.to(vae.device)
|
||||
if self.fp32:
|
||||
|
||||
@@ -147,6 +147,10 @@ GENERATION_MODES = Literal[
|
||||
"flux_img2img",
|
||||
"flux_inpaint",
|
||||
"flux_outpaint",
|
||||
"sd3_txt2img",
|
||||
"sd3_img2img",
|
||||
"sd3_inpaint",
|
||||
"sd3_outpaint",
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -1,43 +1,4 @@
|
||||
import io
|
||||
from typing import Literal, Optional
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import PIL.Image
|
||||
from easing_functions import (
|
||||
BackEaseIn,
|
||||
BackEaseInOut,
|
||||
BackEaseOut,
|
||||
BounceEaseIn,
|
||||
BounceEaseInOut,
|
||||
BounceEaseOut,
|
||||
CircularEaseIn,
|
||||
CircularEaseInOut,
|
||||
CircularEaseOut,
|
||||
CubicEaseIn,
|
||||
CubicEaseInOut,
|
||||
CubicEaseOut,
|
||||
ElasticEaseIn,
|
||||
ElasticEaseInOut,
|
||||
ElasticEaseOut,
|
||||
ExponentialEaseIn,
|
||||
ExponentialEaseInOut,
|
||||
ExponentialEaseOut,
|
||||
LinearInOut,
|
||||
QuadEaseIn,
|
||||
QuadEaseInOut,
|
||||
QuadEaseOut,
|
||||
QuarticEaseIn,
|
||||
QuarticEaseInOut,
|
||||
QuarticEaseOut,
|
||||
QuinticEaseIn,
|
||||
QuinticEaseInOut,
|
||||
QuinticEaseOut,
|
||||
SineEaseIn,
|
||||
SineEaseInOut,
|
||||
SineEaseOut,
|
||||
)
|
||||
from matplotlib.ticker import MaxNLocator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import InputField
|
||||
@@ -65,191 +26,3 @@ class FloatLinearRangeInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
|
||||
param_list = list(np.linspace(self.start, self.stop, self.steps))
|
||||
return FloatCollectionOutput(collection=param_list)
|
||||
|
||||
|
||||
EASING_FUNCTIONS_MAP = {
|
||||
"Linear": LinearInOut,
|
||||
"QuadIn": QuadEaseIn,
|
||||
"QuadOut": QuadEaseOut,
|
||||
"QuadInOut": QuadEaseInOut,
|
||||
"CubicIn": CubicEaseIn,
|
||||
"CubicOut": CubicEaseOut,
|
||||
"CubicInOut": CubicEaseInOut,
|
||||
"QuarticIn": QuarticEaseIn,
|
||||
"QuarticOut": QuarticEaseOut,
|
||||
"QuarticInOut": QuarticEaseInOut,
|
||||
"QuinticIn": QuinticEaseIn,
|
||||
"QuinticOut": QuinticEaseOut,
|
||||
"QuinticInOut": QuinticEaseInOut,
|
||||
"SineIn": SineEaseIn,
|
||||
"SineOut": SineEaseOut,
|
||||
"SineInOut": SineEaseInOut,
|
||||
"CircularIn": CircularEaseIn,
|
||||
"CircularOut": CircularEaseOut,
|
||||
"CircularInOut": CircularEaseInOut,
|
||||
"ExponentialIn": ExponentialEaseIn,
|
||||
"ExponentialOut": ExponentialEaseOut,
|
||||
"ExponentialInOut": ExponentialEaseInOut,
|
||||
"ElasticIn": ElasticEaseIn,
|
||||
"ElasticOut": ElasticEaseOut,
|
||||
"ElasticInOut": ElasticEaseInOut,
|
||||
"BackIn": BackEaseIn,
|
||||
"BackOut": BackEaseOut,
|
||||
"BackInOut": BackEaseInOut,
|
||||
"BounceIn": BounceEaseIn,
|
||||
"BounceOut": BounceEaseOut,
|
||||
"BounceInOut": BounceEaseInOut,
|
||||
}
|
||||
|
||||
EASING_FUNCTION_KEYS = Literal[tuple(EASING_FUNCTIONS_MAP.keys())]
|
||||
|
||||
|
||||
# actually I think for now could just use CollectionOutput (which is list[Any]
|
||||
@invocation(
|
||||
"step_param_easing",
|
||||
title="Step Param Easing",
|
||||
tags=["step", "easing"],
|
||||
category="step",
|
||||
version="1.0.2",
|
||||
)
|
||||
class StepParamEasingInvocation(BaseInvocation):
|
||||
"""Experimental per-step parameter easing for denoising steps"""
|
||||
|
||||
easing: EASING_FUNCTION_KEYS = InputField(default="Linear", description="The easing function to use")
|
||||
num_steps: int = InputField(default=20, description="number of denoising steps")
|
||||
start_value: float = InputField(default=0.0, description="easing starting value")
|
||||
end_value: float = InputField(default=1.0, description="easing ending value")
|
||||
start_step_percent: float = InputField(default=0.0, description="fraction of steps at which to start easing")
|
||||
end_step_percent: float = InputField(default=1.0, description="fraction of steps after which to end easing")
|
||||
# if None, then start_value is used prior to easing start
|
||||
pre_start_value: Optional[float] = InputField(default=None, description="value before easing start")
|
||||
# if None, then end value is used prior to easing end
|
||||
post_end_value: Optional[float] = InputField(default=None, description="value after easing end")
|
||||
mirror: bool = InputField(default=False, description="include mirror of easing function")
|
||||
# FIXME: add alt_mirror option (alternative to default or mirror), or remove entirely
|
||||
# alt_mirror: bool = InputField(default=False, description="alternative mirroring by dual easing")
|
||||
show_easing_plot: bool = InputField(default=False, description="show easing plot")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
|
||||
log_diagnostics = False
|
||||
# convert from start_step_percent to nearest step <= (steps * start_step_percent)
|
||||
# start_step = int(np.floor(self.num_steps * self.start_step_percent))
|
||||
start_step = int(np.round(self.num_steps * self.start_step_percent))
|
||||
# convert from end_step_percent to nearest step >= (steps * end_step_percent)
|
||||
# end_step = int(np.ceil((self.num_steps - 1) * self.end_step_percent))
|
||||
end_step = int(np.round((self.num_steps - 1) * self.end_step_percent))
|
||||
|
||||
# end_step = int(np.ceil(self.num_steps * self.end_step_percent))
|
||||
num_easing_steps = end_step - start_step + 1
|
||||
|
||||
# num_presteps = max(start_step - 1, 0)
|
||||
num_presteps = start_step
|
||||
num_poststeps = self.num_steps - (num_presteps + num_easing_steps)
|
||||
prelist = list(num_presteps * [self.pre_start_value])
|
||||
postlist = list(num_poststeps * [self.post_end_value])
|
||||
|
||||
if log_diagnostics:
|
||||
context.logger.debug("start_step: " + str(start_step))
|
||||
context.logger.debug("end_step: " + str(end_step))
|
||||
context.logger.debug("num_easing_steps: " + str(num_easing_steps))
|
||||
context.logger.debug("num_presteps: " + str(num_presteps))
|
||||
context.logger.debug("num_poststeps: " + str(num_poststeps))
|
||||
context.logger.debug("prelist size: " + str(len(prelist)))
|
||||
context.logger.debug("postlist size: " + str(len(postlist)))
|
||||
context.logger.debug("prelist: " + str(prelist))
|
||||
context.logger.debug("postlist: " + str(postlist))
|
||||
|
||||
easing_class = EASING_FUNCTIONS_MAP[self.easing]
|
||||
if log_diagnostics:
|
||||
context.logger.debug("easing class: " + str(easing_class))
|
||||
easing_list = []
|
||||
if self.mirror: # "expected" mirroring
|
||||
# if number of steps is even, squeeze duration down to (number_of_steps)/2
|
||||
# and create reverse copy of list to append
|
||||
# if number of steps is odd, squeeze duration down to ceil(number_of_steps/2)
|
||||
# and create reverse copy of list[1:end-1]
|
||||
# but if even then number_of_steps/2 === ceil(number_of_steps/2), so can just use ceil always
|
||||
|
||||
base_easing_duration = int(np.ceil(num_easing_steps / 2.0))
|
||||
if log_diagnostics:
|
||||
context.logger.debug("base easing duration: " + str(base_easing_duration))
|
||||
even_num_steps = num_easing_steps % 2 == 0 # even number of steps
|
||||
easing_function = easing_class(
|
||||
start=self.start_value,
|
||||
end=self.end_value,
|
||||
duration=base_easing_duration - 1,
|
||||
)
|
||||
base_easing_vals = []
|
||||
for step_index in range(base_easing_duration):
|
||||
easing_val = easing_function.ease(step_index)
|
||||
base_easing_vals.append(easing_val)
|
||||
if log_diagnostics:
|
||||
context.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(easing_val))
|
||||
if even_num_steps:
|
||||
mirror_easing_vals = list(reversed(base_easing_vals))
|
||||
else:
|
||||
mirror_easing_vals = list(reversed(base_easing_vals[0:-1]))
|
||||
if log_diagnostics:
|
||||
context.logger.debug("base easing vals: " + str(base_easing_vals))
|
||||
context.logger.debug("mirror easing vals: " + str(mirror_easing_vals))
|
||||
easing_list = base_easing_vals + mirror_easing_vals
|
||||
|
||||
# FIXME: add alt_mirror option (alternative to default or mirror), or remove entirely
|
||||
# elif self.alt_mirror: # function mirroring (unintuitive behavior (at least to me))
|
||||
# # half_ease_duration = round(num_easing_steps - 1 / 2)
|
||||
# half_ease_duration = round((num_easing_steps - 1) / 2)
|
||||
# easing_function = easing_class(start=self.start_value,
|
||||
# end=self.end_value,
|
||||
# duration=half_ease_duration,
|
||||
# )
|
||||
#
|
||||
# mirror_function = easing_class(start=self.end_value,
|
||||
# end=self.start_value,
|
||||
# duration=half_ease_duration,
|
||||
# )
|
||||
# for step_index in range(num_easing_steps):
|
||||
# if step_index <= half_ease_duration:
|
||||
# step_val = easing_function.ease(step_index)
|
||||
# else:
|
||||
# step_val = mirror_function.ease(step_index - half_ease_duration)
|
||||
# easing_list.append(step_val)
|
||||
# if log_diagnostics: logger.debug(step_index, step_val)
|
||||
#
|
||||
|
||||
else: # no mirroring (default)
|
||||
easing_function = easing_class(
|
||||
start=self.start_value,
|
||||
end=self.end_value,
|
||||
duration=num_easing_steps - 1,
|
||||
)
|
||||
for step_index in range(num_easing_steps):
|
||||
step_val = easing_function.ease(step_index)
|
||||
easing_list.append(step_val)
|
||||
if log_diagnostics:
|
||||
context.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(step_val))
|
||||
|
||||
if log_diagnostics:
|
||||
context.logger.debug("prelist size: " + str(len(prelist)))
|
||||
context.logger.debug("easing_list size: " + str(len(easing_list)))
|
||||
context.logger.debug("postlist size: " + str(len(postlist)))
|
||||
|
||||
param_list = prelist + easing_list + postlist
|
||||
|
||||
if self.show_easing_plot:
|
||||
plt.figure()
|
||||
plt.xlabel("Step")
|
||||
plt.ylabel("Param Value")
|
||||
plt.title("Per-Step Values Based On Easing: " + self.easing)
|
||||
plt.bar(range(len(param_list)), param_list)
|
||||
# plt.plot(param_list)
|
||||
ax = plt.gca()
|
||||
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
|
||||
buf = io.BytesIO()
|
||||
plt.savefig(buf, format="png")
|
||||
buf.seek(0)
|
||||
im = PIL.Image.open(buf)
|
||||
im.show()
|
||||
buf.close()
|
||||
|
||||
# output array of size steps, each entry list[i] is param value for step i
|
||||
return FloatCollectionOutput(collection=param_list)
|
||||
|
||||
@@ -4,7 +4,13 @@ from typing import Optional
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
BoundingBoxField,
|
||||
@@ -533,3 +539,23 @@ class BoundingBoxInvocation(BaseInvocation):
|
||||
|
||||
|
||||
# endregion
|
||||
|
||||
|
||||
@invocation(
|
||||
"image_batch",
|
||||
title="Image Batch",
|
||||
tags=["primitives", "image", "batch", "internal"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class ImageBatchInvocation(BaseInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each image in the batch."""
|
||||
|
||||
images: list[ImageField] = InputField(min_length=1, description="The images to batch over", input=Input.Direct)
|
||||
|
||||
def __init__(self):
|
||||
raise NotImplementedError("This class should never be executed or instantiated directly.")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
raise NotImplementedError("This class should never be executed or instantiated directly.")
|
||||
|
||||
@@ -1,16 +1,19 @@
|
||||
from typing import Callable, Tuple
|
||||
from typing import Callable, Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torchvision.transforms as tv_transforms
|
||||
from diffusers.models.transformers.transformer_sd3 import SD3Transformer2DModel
|
||||
from diffusers.schedulers.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
LatentsField,
|
||||
SD3ConditioningField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
@@ -19,7 +22,9 @@ from invokeai.app.invocations.model import TransformerField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.invocations.sd3_text_encoder import SD3_T5_MAX_SEQ_LEN
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.sampling_utils import clip_timestep_schedule_fractional
|
||||
from invokeai.backend.model_manager.config import BaseModelType
|
||||
from invokeai.backend.sd3.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import SD3ConditioningInfo
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
@@ -30,16 +35,24 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
title="SD3 Denoise",
|
||||
tags=["image", "sd3"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
version="1.1.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Run denoising process with a SD3 model."""
|
||||
|
||||
# If latents is provided, this means we are doing image-to-image.
|
||||
latents: Optional[LatentsField] = InputField(
|
||||
default=None, description=FieldDescriptions.latents, input=Input.Connection
|
||||
)
|
||||
# denoise_mask is used for image-to-image inpainting. Only the masked region is modified.
|
||||
denoise_mask: Optional[DenoiseMaskField] = InputField(
|
||||
default=None, description=FieldDescriptions.denoise_mask, input=Input.Connection
|
||||
)
|
||||
denoising_start: float = InputField(default=0.0, ge=0, le=1, description=FieldDescriptions.denoising_start)
|
||||
denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end)
|
||||
transformer: TransformerField = InputField(
|
||||
description=FieldDescriptions.sd3_model,
|
||||
input=Input.Connection,
|
||||
title="Transformer",
|
||||
description=FieldDescriptions.sd3_model, input=Input.Connection, title="Transformer"
|
||||
)
|
||||
positive_conditioning: SD3ConditioningField = InputField(
|
||||
description=FieldDescriptions.positive_cond, input=Input.Connection
|
||||
@@ -61,6 +74,41 @@ class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
|
||||
def _prep_inpaint_mask(self, context: InvocationContext, latents: torch.Tensor) -> torch.Tensor | None:
|
||||
"""Prepare the inpaint mask.
|
||||
- Loads the mask
|
||||
- Resizes if necessary
|
||||
- Casts to same device/dtype as latents
|
||||
|
||||
Args:
|
||||
context (InvocationContext): The invocation context, for loading the inpaint mask.
|
||||
latents (torch.Tensor): A latent image tensor. Used to determine the target shape, device, and dtype for the
|
||||
inpaint mask.
|
||||
|
||||
Returns:
|
||||
torch.Tensor | None: Inpaint mask. Values of 0.0 represent the regions to be fully denoised, and 1.0
|
||||
represent the regions to be preserved.
|
||||
"""
|
||||
if self.denoise_mask is None:
|
||||
return None
|
||||
mask = context.tensors.load(self.denoise_mask.mask_name)
|
||||
|
||||
# The input denoise_mask contains values in [0, 1], where 0.0 represents the regions to be fully denoised, and
|
||||
# 1.0 represents the regions to be preserved.
|
||||
# We invert the mask so that the regions to be preserved are 0.0 and the regions to be denoised are 1.0.
|
||||
mask = 1.0 - mask
|
||||
|
||||
_, _, latent_height, latent_width = latents.shape
|
||||
mask = tv_resize(
|
||||
img=mask,
|
||||
size=[latent_height, latent_width],
|
||||
interpolation=tv_transforms.InterpolationMode.BILINEAR,
|
||||
antialias=False,
|
||||
)
|
||||
|
||||
mask = mask.to(device=latents.device, dtype=latents.dtype)
|
||||
return mask
|
||||
|
||||
def _load_text_conditioning(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
@@ -170,14 +218,20 @@ class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
prompt_embeds = torch.cat([neg_prompt_embeds, pos_prompt_embeds], dim=0)
|
||||
pooled_prompt_embeds = torch.cat([neg_pooled_prompt_embeds, pos_pooled_prompt_embeds], dim=0)
|
||||
|
||||
# Prepare the scheduler.
|
||||
scheduler = FlowMatchEulerDiscreteScheduler()
|
||||
scheduler.set_timesteps(num_inference_steps=self.steps, device=device)
|
||||
timesteps = scheduler.timesteps
|
||||
assert isinstance(timesteps, torch.Tensor)
|
||||
# Prepare the timestep schedule.
|
||||
# We add an extra step to the end to account for the final timestep of 0.0.
|
||||
timesteps: list[float] = torch.linspace(1, 0, self.steps + 1).tolist()
|
||||
# Clip the timesteps schedule based on denoising_start and denoising_end.
|
||||
timesteps = clip_timestep_schedule_fractional(timesteps, self.denoising_start, self.denoising_end)
|
||||
total_steps = len(timesteps) - 1
|
||||
|
||||
# Prepare the CFG scale list.
|
||||
cfg_scale = self._prepare_cfg_scale(len(timesteps))
|
||||
cfg_scale = self._prepare_cfg_scale(total_steps)
|
||||
|
||||
# Load the input latents, if provided.
|
||||
init_latents = context.tensors.load(self.latents.latents_name) if self.latents else None
|
||||
if init_latents is not None:
|
||||
init_latents = init_latents.to(device=device, dtype=inference_dtype)
|
||||
|
||||
# Generate initial latent noise.
|
||||
num_channels_latents = transformer_info.model.config.in_channels
|
||||
@@ -191,9 +245,34 @@ class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
device=device,
|
||||
seed=self.seed,
|
||||
)
|
||||
latents: torch.Tensor = noise
|
||||
|
||||
total_steps = len(timesteps)
|
||||
# Prepare input latent image.
|
||||
if init_latents is not None:
|
||||
# Noise the init_latents by the appropriate amount for the first timestep.
|
||||
t_0 = timesteps[0]
|
||||
latents = t_0 * noise + (1.0 - t_0) * init_latents
|
||||
else:
|
||||
# init_latents are not provided, so we are not doing image-to-image (i.e. we are starting from pure noise).
|
||||
if self.denoising_start > 1e-5:
|
||||
raise ValueError("denoising_start should be 0 when initial latents are not provided.")
|
||||
latents = noise
|
||||
|
||||
# If len(timesteps) == 1, then short-circuit. We are just noising the input latents, but not taking any
|
||||
# denoising steps.
|
||||
if len(timesteps) <= 1:
|
||||
return latents
|
||||
|
||||
# Prepare inpaint extension.
|
||||
inpaint_mask = self._prep_inpaint_mask(context, latents)
|
||||
inpaint_extension: InpaintExtension | None = None
|
||||
if inpaint_mask is not None:
|
||||
assert init_latents is not None
|
||||
inpaint_extension = InpaintExtension(
|
||||
init_latents=init_latents,
|
||||
inpaint_mask=inpaint_mask,
|
||||
noise=noise,
|
||||
)
|
||||
|
||||
step_callback = self._build_step_callback(context)
|
||||
|
||||
step_callback(
|
||||
@@ -210,11 +289,12 @@ class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
assert isinstance(transformer, SD3Transformer2DModel)
|
||||
|
||||
# 6. Denoising loop
|
||||
for step_idx, t in tqdm(list(enumerate(timesteps))):
|
||||
for step_idx, (t_curr, t_prev) in tqdm(list(enumerate(zip(timesteps[:-1], timesteps[1:], strict=True)))):
|
||||
# Expand the latents if we are doing CFG.
|
||||
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
||||
# Expand the timestep to match the latent model input.
|
||||
timestep = t.expand(latent_model_input.shape[0])
|
||||
# Multiply by 1000 to match the default FlowMatchEulerDiscreteScheduler num_train_timesteps.
|
||||
timestep = torch.tensor([t_curr * 1000], device=device).expand(latent_model_input.shape[0])
|
||||
|
||||
noise_pred = transformer(
|
||||
hidden_states=latent_model_input,
|
||||
@@ -232,21 +312,19 @@ class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
# Compute the previous noisy sample x_t -> x_t-1.
|
||||
latents_dtype = latents.dtype
|
||||
latents = scheduler.step(model_output=noise_pred, timestep=t, sample=latents, return_dict=False)[0]
|
||||
latents = latents.to(dtype=torch.float32)
|
||||
latents = latents + (t_prev - t_curr) * noise_pred
|
||||
latents = latents.to(dtype=latents_dtype)
|
||||
|
||||
# TODO(ryand): This MPS dtype handling was copied from diffusers, I haven't tested to see if it's
|
||||
# needed.
|
||||
if latents.dtype != latents_dtype:
|
||||
if torch.backends.mps.is_available():
|
||||
# some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
|
||||
latents = latents.to(latents_dtype)
|
||||
if inpaint_extension is not None:
|
||||
latents = inpaint_extension.merge_intermediate_latents_with_init_latents(latents, t_prev)
|
||||
|
||||
step_callback(
|
||||
PipelineIntermediateState(
|
||||
step=step_idx + 1,
|
||||
order=1,
|
||||
total_steps=total_steps,
|
||||
timestep=int(t),
|
||||
timestep=int(t_curr),
|
||||
latents=latents,
|
||||
),
|
||||
)
|
||||
|
||||
65
invokeai/app/invocations/sd3_image_to_latents.py
Normal file
65
invokeai/app/invocations/sd3_image_to_latents.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import einops
|
||||
import torch
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import VAEField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
|
||||
|
||||
@invocation(
|
||||
"sd3_i2l",
|
||||
title="SD3 Image to Latents",
|
||||
tags=["image", "latents", "vae", "i2l", "sd3"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates latents from an image."""
|
||||
|
||||
image: ImageField = InputField(description="The image to encode")
|
||||
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae, AutoencoderKL)
|
||||
|
||||
vae.disable_tiling()
|
||||
|
||||
image_tensor = image_tensor.to(device=vae.device, dtype=vae.dtype)
|
||||
with torch.inference_mode():
|
||||
image_tensor_dist = vae.encode(image_tensor).latent_dist
|
||||
# TODO: Use seed to make sampling reproducible.
|
||||
latents: torch.Tensor = image_tensor_dist.sample().to(dtype=vae.dtype)
|
||||
|
||||
latents = vae.config.scaling_factor * latents
|
||||
|
||||
return latents
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
if image_tensor.dim() == 3:
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
@@ -47,6 +47,7 @@ class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL))
|
||||
with SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes), vae_info as vae:
|
||||
context.util.signal_progress("Running VAE")
|
||||
assert isinstance(vae, (AutoencoderKL))
|
||||
latents = latents.to(vae.device)
|
||||
|
||||
|
||||
@@ -95,6 +95,7 @@ class Sd3TextEncoderInvocation(BaseInvocation):
|
||||
t5_text_encoder_info as t5_text_encoder,
|
||||
t5_tokenizer_info as t5_tokenizer,
|
||||
):
|
||||
context.util.signal_progress("Running T5 encoder")
|
||||
assert isinstance(t5_text_encoder, T5EncoderModel)
|
||||
assert isinstance(t5_tokenizer, (T5Tokenizer, T5TokenizerFast))
|
||||
|
||||
@@ -137,6 +138,7 @@ class Sd3TextEncoderInvocation(BaseInvocation):
|
||||
clip_tokenizer_info as clip_tokenizer,
|
||||
ExitStack() as exit_stack,
|
||||
):
|
||||
context.util.signal_progress("Running CLIP encoder")
|
||||
assert isinstance(clip_text_encoder, (CLIPTextModel, CLIPTextModelWithProjection))
|
||||
assert isinstance(clip_tokenizer, CLIPTokenizer)
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ class ModelLoadService(ModelLoadServiceBase):
|
||||
|
||||
def torch_load_file(checkpoint: Path) -> AnyModel:
|
||||
scan_result = scan_file_path(checkpoint)
|
||||
if scan_result.infected_files != 0:
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise Exception("The model at {checkpoint} is potentially infected by malware. Aborting load.")
|
||||
result = torch_load(checkpoint, map_location="cpu")
|
||||
return result
|
||||
|
||||
@@ -160,6 +160,10 @@ class LoggerInterface(InvocationContextInterface):
|
||||
|
||||
|
||||
class ImagesInterface(InvocationContextInterface):
|
||||
def __init__(self, services: InvocationServices, data: InvocationContextData, util: "UtilInterface") -> None:
|
||||
super().__init__(services, data)
|
||||
self._util = util
|
||||
|
||||
def save(
|
||||
self,
|
||||
image: Image,
|
||||
@@ -186,6 +190,8 @@ class ImagesInterface(InvocationContextInterface):
|
||||
The saved image DTO.
|
||||
"""
|
||||
|
||||
self._util.signal_progress("Saving image")
|
||||
|
||||
# If `metadata` is provided directly, use that. Else, use the metadata provided by `WithMetadata`, falling back to None.
|
||||
metadata_ = None
|
||||
if metadata:
|
||||
@@ -336,6 +342,10 @@ class ConditioningInterface(InvocationContextInterface):
|
||||
class ModelsInterface(InvocationContextInterface):
|
||||
"""Common API for loading, downloading and managing models."""
|
||||
|
||||
def __init__(self, services: InvocationServices, data: InvocationContextData, util: "UtilInterface") -> None:
|
||||
super().__init__(services, data)
|
||||
self._util = util
|
||||
|
||||
def exists(self, identifier: Union[str, "ModelIdentifierField"]) -> bool:
|
||||
"""Check if a model exists.
|
||||
|
||||
@@ -368,11 +378,15 @@ class ModelsInterface(InvocationContextInterface):
|
||||
|
||||
if isinstance(identifier, str):
|
||||
model = self._services.model_manager.store.get_model(identifier)
|
||||
return self._services.model_manager.load.load_model(model, submodel_type)
|
||||
else:
|
||||
_submodel_type = submodel_type or identifier.submodel_type
|
||||
submodel_type = submodel_type or identifier.submodel_type
|
||||
model = self._services.model_manager.store.get_model(identifier.key)
|
||||
return self._services.model_manager.load.load_model(model, _submodel_type)
|
||||
|
||||
message = f"Loading model {model.name}"
|
||||
if submodel_type:
|
||||
message += f" ({submodel_type.value})"
|
||||
self._util.signal_progress(message)
|
||||
return self._services.model_manager.load.load_model(model, submodel_type)
|
||||
|
||||
def load_by_attrs(
|
||||
self, name: str, base: BaseModelType, type: ModelType, submodel_type: Optional[SubModelType] = None
|
||||
@@ -397,6 +411,10 @@ class ModelsInterface(InvocationContextInterface):
|
||||
if len(configs) > 1:
|
||||
raise ValueError(f"More than one model found with name {name}, base {base}, and type {type}")
|
||||
|
||||
message = f"Loading model {name}"
|
||||
if submodel_type:
|
||||
message += f" ({submodel_type.value})"
|
||||
self._util.signal_progress(message)
|
||||
return self._services.model_manager.load.load_model(configs[0], submodel_type)
|
||||
|
||||
def get_config(self, identifier: Union[str, "ModelIdentifierField"]) -> AnyModelConfig:
|
||||
@@ -467,6 +485,7 @@ class ModelsInterface(InvocationContextInterface):
|
||||
Returns:
|
||||
Path to the downloaded model
|
||||
"""
|
||||
self._util.signal_progress(f"Downloading model {source}")
|
||||
return self._services.model_manager.install.download_and_cache_model(source=source)
|
||||
|
||||
def load_local_model(
|
||||
@@ -489,6 +508,8 @@ class ModelsInterface(InvocationContextInterface):
|
||||
Returns:
|
||||
A LoadedModelWithoutConfig object.
|
||||
"""
|
||||
|
||||
self._util.signal_progress(f"Loading model {model_path.name}")
|
||||
return self._services.model_manager.load.load_model_from_path(model_path=model_path, loader=loader)
|
||||
|
||||
def load_remote_model(
|
||||
@@ -514,6 +535,8 @@ class ModelsInterface(InvocationContextInterface):
|
||||
A LoadedModelWithoutConfig object.
|
||||
"""
|
||||
model_path = self._services.model_manager.install.download_and_cache_model(source=str(source))
|
||||
|
||||
self._util.signal_progress(f"Loading model {source}")
|
||||
return self._services.model_manager.load.load_model_from_path(model_path=model_path, loader=loader)
|
||||
|
||||
|
||||
@@ -707,12 +730,12 @@ def build_invocation_context(
|
||||
"""
|
||||
|
||||
logger = LoggerInterface(services=services, data=data)
|
||||
images = ImagesInterface(services=services, data=data)
|
||||
tensors = TensorsInterface(services=services, data=data)
|
||||
models = ModelsInterface(services=services, data=data)
|
||||
config = ConfigInterface(services=services, data=data)
|
||||
util = UtilInterface(services=services, data=data, is_canceled=is_canceled)
|
||||
conditioning = ConditioningInterface(services=services, data=data)
|
||||
models = ModelsInterface(services=services, data=data, util=util)
|
||||
images = ImagesInterface(services=services, data=data, util=util)
|
||||
boards = BoardsInterface(services=services, data=data)
|
||||
|
||||
ctx = InvocationContext(
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import einops
|
||||
import torch
|
||||
|
||||
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.math import attention
|
||||
from invokeai.backend.flux.modules.layers import DoubleStreamBlock
|
||||
from invokeai.backend.flux.modules.layers import DoubleStreamBlock, SingleStreamBlock
|
||||
|
||||
|
||||
class CustomDoubleStreamBlockProcessor:
|
||||
@@ -13,7 +14,12 @@ class CustomDoubleStreamBlockProcessor:
|
||||
|
||||
@staticmethod
|
||||
def _double_stream_block_forward(
|
||||
block: DoubleStreamBlock, img: torch.Tensor, txt: torch.Tensor, vec: torch.Tensor, pe: torch.Tensor
|
||||
block: DoubleStreamBlock,
|
||||
img: torch.Tensor,
|
||||
txt: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
attn_mask: torch.Tensor | None = None,
|
||||
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""This function is a direct copy of DoubleStreamBlock.forward(), but it returns some of the intermediate
|
||||
values.
|
||||
@@ -40,7 +46,7 @@ class CustomDoubleStreamBlockProcessor:
|
||||
k = torch.cat((txt_k, img_k), dim=2)
|
||||
v = torch.cat((txt_v, img_v), dim=2)
|
||||
|
||||
attn = attention(q, k, v, pe=pe)
|
||||
attn = attention(q, k, v, pe=pe, attn_mask=attn_mask)
|
||||
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]
|
||||
|
||||
# calculate the img bloks
|
||||
@@ -63,11 +69,15 @@ class CustomDoubleStreamBlockProcessor:
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
ip_adapter_extensions: list[XLabsIPAdapterExtension],
|
||||
regional_prompting_extension: RegionalPromptingExtension,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""A custom implementation of DoubleStreamBlock.forward() with additional features:
|
||||
- IP-Adapter support
|
||||
"""
|
||||
img, txt, img_q = CustomDoubleStreamBlockProcessor._double_stream_block_forward(block, img, txt, vec, pe)
|
||||
attn_mask = regional_prompting_extension.get_double_stream_attn_mask(block_index)
|
||||
img, txt, img_q = CustomDoubleStreamBlockProcessor._double_stream_block_forward(
|
||||
block, img, txt, vec, pe, attn_mask=attn_mask
|
||||
)
|
||||
|
||||
# Apply IP-Adapter conditioning.
|
||||
for ip_adapter_extension in ip_adapter_extensions:
|
||||
@@ -81,3 +91,48 @@ class CustomDoubleStreamBlockProcessor:
|
||||
)
|
||||
|
||||
return img, txt
|
||||
|
||||
|
||||
class CustomSingleStreamBlockProcessor:
|
||||
"""A class containing a custom implementation of SingleStreamBlock.forward() with additional features (masking,
|
||||
etc.)
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _single_stream_block_forward(
|
||||
block: SingleStreamBlock,
|
||||
x: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
attn_mask: torch.Tensor | None = None,
|
||||
) -> torch.Tensor:
|
||||
"""This function is a direct copy of SingleStreamBlock.forward()."""
|
||||
mod, _ = block.modulation(vec)
|
||||
x_mod = (1 + mod.scale) * block.pre_norm(x) + mod.shift
|
||||
qkv, mlp = torch.split(block.linear1(x_mod), [3 * block.hidden_size, block.mlp_hidden_dim], dim=-1)
|
||||
|
||||
q, k, v = einops.rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=block.num_heads)
|
||||
q, k = block.norm(q, k, v)
|
||||
|
||||
# compute attention
|
||||
attn = attention(q, k, v, pe=pe, attn_mask=attn_mask)
|
||||
# compute activation in mlp stream, cat again and run second linear layer
|
||||
output = block.linear2(torch.cat((attn, block.mlp_act(mlp)), 2))
|
||||
return x + mod.gate * output
|
||||
|
||||
@staticmethod
|
||||
def custom_single_block_forward(
|
||||
timestep_index: int,
|
||||
total_num_timesteps: int,
|
||||
block_index: int,
|
||||
block: SingleStreamBlock,
|
||||
img: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
regional_prompting_extension: RegionalPromptingExtension,
|
||||
) -> torch.Tensor:
|
||||
"""A custom implementation of SingleStreamBlock.forward() with additional features:
|
||||
- Masking
|
||||
"""
|
||||
attn_mask = regional_prompting_extension.get_single_stream_attn_mask(block_index)
|
||||
return CustomSingleStreamBlockProcessor._single_stream_block_forward(block, img, vec, pe, attn_mask=attn_mask)
|
||||
|
||||
@@ -7,6 +7,7 @@ from tqdm import tqdm
|
||||
from invokeai.backend.flux.controlnet.controlnet_flux_output import ControlNetFluxOutput, sum_controlnet_flux_outputs
|
||||
from invokeai.backend.flux.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
|
||||
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.model import Flux
|
||||
@@ -18,14 +19,8 @@ def denoise(
|
||||
# model input
|
||||
img: torch.Tensor,
|
||||
img_ids: torch.Tensor,
|
||||
# positive text conditioning
|
||||
txt: torch.Tensor,
|
||||
txt_ids: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
# negative text conditioning
|
||||
neg_txt: torch.Tensor | None,
|
||||
neg_txt_ids: torch.Tensor | None,
|
||||
neg_vec: torch.Tensor | None,
|
||||
pos_regional_prompting_extension: RegionalPromptingExtension,
|
||||
neg_regional_prompting_extension: RegionalPromptingExtension | None,
|
||||
# sampling parameters
|
||||
timesteps: list[float],
|
||||
step_callback: Callable[[PipelineIntermediateState], None],
|
||||
@@ -61,9 +56,9 @@ def denoise(
|
||||
total_num_timesteps=total_steps,
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=txt,
|
||||
txt_ids=txt_ids,
|
||||
y=vec,
|
||||
txt=pos_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
|
||||
txt_ids=pos_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
|
||||
y=pos_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
)
|
||||
@@ -78,9 +73,9 @@ def denoise(
|
||||
pred = model(
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=txt,
|
||||
txt_ids=txt_ids,
|
||||
y=vec,
|
||||
txt=pos_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
|
||||
txt_ids=pos_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
|
||||
y=pos_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
timestep_index=step_index,
|
||||
@@ -88,6 +83,7 @@ def denoise(
|
||||
controlnet_double_block_residuals=merged_controlnet_residuals.double_block_residuals,
|
||||
controlnet_single_block_residuals=merged_controlnet_residuals.single_block_residuals,
|
||||
ip_adapter_extensions=pos_ip_adapter_extensions,
|
||||
regional_prompting_extension=pos_regional_prompting_extension,
|
||||
)
|
||||
|
||||
step_cfg_scale = cfg_scale[step_index]
|
||||
@@ -97,15 +93,15 @@ def denoise(
|
||||
# TODO(ryand): Add option to run positive and negative predictions in a single batch for better performance
|
||||
# on systems with sufficient VRAM.
|
||||
|
||||
if neg_txt is None or neg_txt_ids is None or neg_vec is None:
|
||||
if neg_regional_prompting_extension is None:
|
||||
raise ValueError("Negative text conditioning is required when cfg_scale is not 1.0.")
|
||||
|
||||
neg_pred = model(
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=neg_txt,
|
||||
txt_ids=neg_txt_ids,
|
||||
y=neg_vec,
|
||||
txt=neg_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
|
||||
txt_ids=neg_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
|
||||
y=neg_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
timestep_index=step_index,
|
||||
@@ -113,6 +109,7 @@ def denoise(
|
||||
controlnet_double_block_residuals=None,
|
||||
controlnet_single_block_residuals=None,
|
||||
ip_adapter_extensions=neg_ip_adapter_extensions,
|
||||
regional_prompting_extension=neg_regional_prompting_extension,
|
||||
)
|
||||
pred = neg_pred + step_cfg_scale * (pred - neg_pred)
|
||||
|
||||
|
||||
276
invokeai/backend/flux/extensions/regional_prompting_extension.py
Normal file
276
invokeai/backend/flux/extensions/regional_prompting_extension.py
Normal file
@@ -0,0 +1,276 @@
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torchvision
|
||||
|
||||
from invokeai.backend.flux.text_conditioning import FluxRegionalTextConditioning, FluxTextConditioning
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Range
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.mask import to_standard_float_mask
|
||||
|
||||
|
||||
class RegionalPromptingExtension:
|
||||
"""A class for managing regional prompting with FLUX.
|
||||
|
||||
This implementation is inspired by https://arxiv.org/pdf/2411.02395 (though there are significant differences).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
regional_text_conditioning: FluxRegionalTextConditioning,
|
||||
restricted_attn_mask: torch.Tensor | None = None,
|
||||
):
|
||||
self.regional_text_conditioning = regional_text_conditioning
|
||||
self.restricted_attn_mask = restricted_attn_mask
|
||||
|
||||
def get_double_stream_attn_mask(self, block_index: int) -> torch.Tensor | None:
|
||||
order = [self.restricted_attn_mask, None]
|
||||
return order[block_index % len(order)]
|
||||
|
||||
def get_single_stream_attn_mask(self, block_index: int) -> torch.Tensor | None:
|
||||
order = [self.restricted_attn_mask, None]
|
||||
return order[block_index % len(order)]
|
||||
|
||||
@classmethod
|
||||
def from_text_conditioning(cls, text_conditioning: list[FluxTextConditioning], img_seq_len: int):
|
||||
"""Create a RegionalPromptingExtension from a list of text conditionings.
|
||||
|
||||
Args:
|
||||
text_conditioning (list[FluxTextConditioning]): The text conditionings to use for regional prompting.
|
||||
img_seq_len (int): The image sequence length (i.e. packed_height * packed_width).
|
||||
"""
|
||||
regional_text_conditioning = cls._concat_regional_text_conditioning(text_conditioning)
|
||||
attn_mask_with_restricted_img_self_attn = cls._prepare_restricted_attn_mask(
|
||||
regional_text_conditioning, img_seq_len
|
||||
)
|
||||
return cls(
|
||||
regional_text_conditioning=regional_text_conditioning,
|
||||
restricted_attn_mask=attn_mask_with_restricted_img_self_attn,
|
||||
)
|
||||
|
||||
# Keeping _prepare_unrestricted_attn_mask for reference as an alternative masking strategy:
|
||||
#
|
||||
# @classmethod
|
||||
# def _prepare_unrestricted_attn_mask(
|
||||
# cls,
|
||||
# regional_text_conditioning: FluxRegionalTextConditioning,
|
||||
# img_seq_len: int,
|
||||
# ) -> torch.Tensor:
|
||||
# """Prepare an 'unrestricted' attention mask. In this context, 'unrestricted' means that:
|
||||
# - img self-attention is not masked.
|
||||
# - img regions attend to both txt within their own region and to global prompts.
|
||||
# """
|
||||
# device = TorchDevice.choose_torch_device()
|
||||
|
||||
# # Infer txt_seq_len from the t5_embeddings tensor.
|
||||
# txt_seq_len = regional_text_conditioning.t5_embeddings.shape[1]
|
||||
|
||||
# # In the attention blocks, the txt seq and img seq are concatenated and then attention is applied.
|
||||
# # Concatenation happens in the following order: [txt_seq, img_seq].
|
||||
# # There are 4 portions of the attention mask to consider as we prepare it:
|
||||
# # 1. txt attends to itself
|
||||
# # 2. txt attends to corresponding regional img
|
||||
# # 3. regional img attends to corresponding txt
|
||||
# # 4. regional img attends to itself
|
||||
|
||||
# # Initialize empty attention mask.
|
||||
# regional_attention_mask = torch.zeros(
|
||||
# (txt_seq_len + img_seq_len, txt_seq_len + img_seq_len), device=device, dtype=torch.float16
|
||||
# )
|
||||
|
||||
# for image_mask, t5_embedding_range in zip(
|
||||
# regional_text_conditioning.image_masks, regional_text_conditioning.t5_embedding_ranges, strict=True
|
||||
# ):
|
||||
# # 1. txt attends to itself
|
||||
# regional_attention_mask[
|
||||
# t5_embedding_range.start : t5_embedding_range.end, t5_embedding_range.start : t5_embedding_range.end
|
||||
# ] = 1.0
|
||||
|
||||
# # 2. txt attends to corresponding regional img
|
||||
# # Note that we reshape to (1, img_seq_len) to ensure broadcasting works as desired.
|
||||
# fill_value = image_mask.view(1, img_seq_len) if image_mask is not None else 1.0
|
||||
# regional_attention_mask[t5_embedding_range.start : t5_embedding_range.end, txt_seq_len:] = fill_value
|
||||
|
||||
# # 3. regional img attends to corresponding txt
|
||||
# # Note that we reshape to (img_seq_len, 1) to ensure broadcasting works as desired.
|
||||
# fill_value = image_mask.view(img_seq_len, 1) if image_mask is not None else 1.0
|
||||
# regional_attention_mask[txt_seq_len:, t5_embedding_range.start : t5_embedding_range.end] = fill_value
|
||||
|
||||
# # 4. regional img attends to itself
|
||||
# # Allow unrestricted img self attention.
|
||||
# regional_attention_mask[txt_seq_len:, txt_seq_len:] = 1.0
|
||||
|
||||
# # Convert attention mask to boolean.
|
||||
# regional_attention_mask = regional_attention_mask > 0.5
|
||||
|
||||
# return regional_attention_mask
|
||||
|
||||
@classmethod
|
||||
def _prepare_restricted_attn_mask(
|
||||
cls,
|
||||
regional_text_conditioning: FluxRegionalTextConditioning,
|
||||
img_seq_len: int,
|
||||
) -> torch.Tensor | None:
|
||||
"""Prepare a 'restricted' attention mask. In this context, 'restricted' means that:
|
||||
- img self-attention is only allowed within regions.
|
||||
- img regions only attend to txt within their own region, not to global prompts.
|
||||
"""
|
||||
# Identify background region. I.e. the region that is not covered by any region masks.
|
||||
background_region_mask: None | torch.Tensor = None
|
||||
for image_mask in regional_text_conditioning.image_masks:
|
||||
if image_mask is not None:
|
||||
if background_region_mask is None:
|
||||
background_region_mask = torch.ones_like(image_mask)
|
||||
background_region_mask *= 1 - image_mask
|
||||
|
||||
if background_region_mask is None:
|
||||
# There are no region masks, short-circuit and return None.
|
||||
# TODO(ryand): We could restrict txt-txt attention across multiple global prompts, but this would
|
||||
# is a rare use case and would make the logic here significantly more complicated.
|
||||
return None
|
||||
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
# Infer txt_seq_len from the t5_embeddings tensor.
|
||||
txt_seq_len = regional_text_conditioning.t5_embeddings.shape[1]
|
||||
|
||||
# In the attention blocks, the txt seq and img seq are concatenated and then attention is applied.
|
||||
# Concatenation happens in the following order: [txt_seq, img_seq].
|
||||
# There are 4 portions of the attention mask to consider as we prepare it:
|
||||
# 1. txt attends to itself
|
||||
# 2. txt attends to corresponding regional img
|
||||
# 3. regional img attends to corresponding txt
|
||||
# 4. regional img attends to itself
|
||||
|
||||
# Initialize empty attention mask.
|
||||
regional_attention_mask = torch.zeros(
|
||||
(txt_seq_len + img_seq_len, txt_seq_len + img_seq_len), device=device, dtype=torch.float16
|
||||
)
|
||||
|
||||
for image_mask, t5_embedding_range in zip(
|
||||
regional_text_conditioning.image_masks, regional_text_conditioning.t5_embedding_ranges, strict=True
|
||||
):
|
||||
# 1. txt attends to itself
|
||||
regional_attention_mask[
|
||||
t5_embedding_range.start : t5_embedding_range.end, t5_embedding_range.start : t5_embedding_range.end
|
||||
] = 1.0
|
||||
|
||||
if image_mask is not None:
|
||||
# 2. txt attends to corresponding regional img
|
||||
# Note that we reshape to (1, img_seq_len) to ensure broadcasting works as desired.
|
||||
regional_attention_mask[t5_embedding_range.start : t5_embedding_range.end, txt_seq_len:] = (
|
||||
image_mask.view(1, img_seq_len)
|
||||
)
|
||||
|
||||
# 3. regional img attends to corresponding txt
|
||||
# Note that we reshape to (img_seq_len, 1) to ensure broadcasting works as desired.
|
||||
regional_attention_mask[txt_seq_len:, t5_embedding_range.start : t5_embedding_range.end] = (
|
||||
image_mask.view(img_seq_len, 1)
|
||||
)
|
||||
|
||||
# 4. regional img attends to itself
|
||||
image_mask = image_mask.view(img_seq_len, 1)
|
||||
regional_attention_mask[txt_seq_len:, txt_seq_len:] += image_mask @ image_mask.T
|
||||
else:
|
||||
# We don't allow attention between non-background image regions and global prompts. This helps to ensure
|
||||
# that regions focus on their local prompts. We do, however, allow attention between background regions
|
||||
# and global prompts. If we didn't do this, then the background regions would not attend to any txt
|
||||
# embeddings, which we found experimentally to cause artifacts.
|
||||
|
||||
# 2. global txt attends to background region
|
||||
# Note that we reshape to (1, img_seq_len) to ensure broadcasting works as desired.
|
||||
regional_attention_mask[t5_embedding_range.start : t5_embedding_range.end, txt_seq_len:] = (
|
||||
background_region_mask.view(1, img_seq_len)
|
||||
)
|
||||
|
||||
# 3. background region attends to global txt
|
||||
# Note that we reshape to (img_seq_len, 1) to ensure broadcasting works as desired.
|
||||
regional_attention_mask[txt_seq_len:, t5_embedding_range.start : t5_embedding_range.end] = (
|
||||
background_region_mask.view(img_seq_len, 1)
|
||||
)
|
||||
|
||||
# Allow background regions to attend to themselves.
|
||||
regional_attention_mask[txt_seq_len:, txt_seq_len:] += background_region_mask.view(img_seq_len, 1)
|
||||
regional_attention_mask[txt_seq_len:, txt_seq_len:] += background_region_mask.view(1, img_seq_len)
|
||||
|
||||
# Convert attention mask to boolean.
|
||||
regional_attention_mask = regional_attention_mask > 0.5
|
||||
|
||||
return regional_attention_mask
|
||||
|
||||
@classmethod
|
||||
def _concat_regional_text_conditioning(
|
||||
cls,
|
||||
text_conditionings: list[FluxTextConditioning],
|
||||
) -> FluxRegionalTextConditioning:
|
||||
"""Concatenate regional text conditioning data into a single conditioning tensor (with associated masks)."""
|
||||
concat_t5_embeddings: list[torch.Tensor] = []
|
||||
concat_t5_embedding_ranges: list[Range] = []
|
||||
image_masks: list[torch.Tensor | None] = []
|
||||
|
||||
# Choose global CLIP embedding.
|
||||
# Use the first global prompt's CLIP embedding as the global CLIP embedding. If there is no global prompt, use
|
||||
# the first prompt's CLIP embedding.
|
||||
global_clip_embedding: torch.Tensor = text_conditionings[0].clip_embeddings
|
||||
for text_conditioning in text_conditionings:
|
||||
if text_conditioning.mask is None:
|
||||
global_clip_embedding = text_conditioning.clip_embeddings
|
||||
break
|
||||
|
||||
cur_t5_embedding_len = 0
|
||||
for text_conditioning in text_conditionings:
|
||||
concat_t5_embeddings.append(text_conditioning.t5_embeddings)
|
||||
|
||||
concat_t5_embedding_ranges.append(
|
||||
Range(start=cur_t5_embedding_len, end=cur_t5_embedding_len + text_conditioning.t5_embeddings.shape[1])
|
||||
)
|
||||
|
||||
image_masks.append(text_conditioning.mask)
|
||||
|
||||
cur_t5_embedding_len += text_conditioning.t5_embeddings.shape[1]
|
||||
|
||||
t5_embeddings = torch.cat(concat_t5_embeddings, dim=1)
|
||||
|
||||
# Initialize the txt_ids tensor.
|
||||
pos_bs, pos_t5_seq_len, _ = t5_embeddings.shape
|
||||
t5_txt_ids = torch.zeros(
|
||||
pos_bs, pos_t5_seq_len, 3, dtype=t5_embeddings.dtype, device=TorchDevice.choose_torch_device()
|
||||
)
|
||||
|
||||
return FluxRegionalTextConditioning(
|
||||
t5_embeddings=t5_embeddings,
|
||||
clip_embeddings=global_clip_embedding,
|
||||
t5_txt_ids=t5_txt_ids,
|
||||
image_masks=image_masks,
|
||||
t5_embedding_ranges=concat_t5_embedding_ranges,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def preprocess_regional_prompt_mask(
|
||||
mask: Optional[torch.Tensor], packed_height: int, packed_width: int, dtype: torch.dtype, device: torch.device
|
||||
) -> torch.Tensor:
|
||||
"""Preprocess a regional prompt mask to match the target height and width.
|
||||
If mask is None, returns a mask of all ones with the target height and width.
|
||||
If mask is not None, resizes the mask to the target height and width using 'nearest' interpolation.
|
||||
|
||||
packed_height and packed_width are the target height and width of the mask in the 'packed' latent space.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: The processed mask. shape: (1, 1, packed_height * packed_width).
|
||||
"""
|
||||
|
||||
if mask is None:
|
||||
return torch.ones((1, 1, packed_height * packed_width), dtype=dtype, device=device)
|
||||
|
||||
mask = to_standard_float_mask(mask, out_dtype=dtype)
|
||||
|
||||
tf = torchvision.transforms.Resize(
|
||||
(packed_height, packed_width), interpolation=torchvision.transforms.InterpolationMode.NEAREST
|
||||
)
|
||||
|
||||
# Add a batch dimension to the mask, because torchvision expects shape (batch, channels, h, w).
|
||||
mask = mask.unsqueeze(0) # Shape: (1, h, w) -> (1, 1, h, w)
|
||||
resized_mask = tf(mask)
|
||||
|
||||
# Flatten the height and width dimensions into a single image_seq_len dimension.
|
||||
return resized_mask.flatten(start_dim=2)
|
||||
@@ -41,10 +41,12 @@ def infer_xlabs_ip_adapter_params_from_state_dict(state_dict: dict[str, torch.Te
|
||||
hidden_dim = state_dict["double_blocks.0.processor.ip_adapter_double_stream_k_proj.weight"].shape[0]
|
||||
context_dim = state_dict["double_blocks.0.processor.ip_adapter_double_stream_k_proj.weight"].shape[1]
|
||||
clip_embeddings_dim = state_dict["ip_adapter_proj_model.proj.weight"].shape[1]
|
||||
clip_extra_context_tokens = state_dict["ip_adapter_proj_model.proj.weight"].shape[0] // context_dim
|
||||
|
||||
return XlabsIpAdapterParams(
|
||||
num_double_blocks=num_double_blocks,
|
||||
context_dim=context_dim,
|
||||
hidden_dim=hidden_dim,
|
||||
clip_embeddings_dim=clip_embeddings_dim,
|
||||
clip_extra_context_tokens=clip_extra_context_tokens,
|
||||
)
|
||||
|
||||
@@ -31,13 +31,16 @@ class XlabsIpAdapterParams:
|
||||
hidden_dim: int
|
||||
|
||||
clip_embeddings_dim: int
|
||||
clip_extra_context_tokens: int
|
||||
|
||||
|
||||
class XlabsIpAdapterFlux(torch.nn.Module):
|
||||
def __init__(self, params: XlabsIpAdapterParams):
|
||||
super().__init__()
|
||||
self.image_proj = ImageProjModel(
|
||||
cross_attention_dim=params.context_dim, clip_embeddings_dim=params.clip_embeddings_dim
|
||||
cross_attention_dim=params.context_dim,
|
||||
clip_embeddings_dim=params.clip_embeddings_dim,
|
||||
clip_extra_context_tokens=params.clip_extra_context_tokens,
|
||||
)
|
||||
self.ip_adapter_double_blocks = IPAdapterDoubleBlocks(
|
||||
num_double_blocks=params.num_double_blocks, context_dim=params.context_dim, hidden_dim=params.hidden_dim
|
||||
|
||||
@@ -5,10 +5,10 @@ from einops import rearrange
|
||||
from torch import Tensor
|
||||
|
||||
|
||||
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor:
|
||||
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, attn_mask: Tensor | None = None) -> Tensor:
|
||||
q, k = apply_rope(q, k, pe)
|
||||
|
||||
x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
|
||||
x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask)
|
||||
x = rearrange(x, "B H L D -> B L (H D)")
|
||||
|
||||
return x
|
||||
|
||||
@@ -5,7 +5,11 @@ from dataclasses import dataclass
|
||||
import torch
|
||||
from torch import Tensor, nn
|
||||
|
||||
from invokeai.backend.flux.custom_block_processor import CustomDoubleStreamBlockProcessor
|
||||
from invokeai.backend.flux.custom_block_processor import (
|
||||
CustomDoubleStreamBlockProcessor,
|
||||
CustomSingleStreamBlockProcessor,
|
||||
)
|
||||
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.modules.layers import (
|
||||
DoubleStreamBlock,
|
||||
@@ -95,6 +99,7 @@ class Flux(nn.Module):
|
||||
controlnet_double_block_residuals: list[Tensor] | None,
|
||||
controlnet_single_block_residuals: list[Tensor] | None,
|
||||
ip_adapter_extensions: list[XLabsIPAdapterExtension],
|
||||
regional_prompting_extension: RegionalPromptingExtension,
|
||||
) -> Tensor:
|
||||
if img.ndim != 3 or txt.ndim != 3:
|
||||
raise ValueError("Input img and txt tensors must have 3 dimensions.")
|
||||
@@ -117,7 +122,6 @@ class Flux(nn.Module):
|
||||
assert len(controlnet_double_block_residuals) == len(self.double_blocks)
|
||||
for block_index, block in enumerate(self.double_blocks):
|
||||
assert isinstance(block, DoubleStreamBlock)
|
||||
|
||||
img, txt = CustomDoubleStreamBlockProcessor.custom_double_block_forward(
|
||||
timestep_index=timestep_index,
|
||||
total_num_timesteps=total_num_timesteps,
|
||||
@@ -128,6 +132,7 @@ class Flux(nn.Module):
|
||||
vec=vec,
|
||||
pe=pe,
|
||||
ip_adapter_extensions=ip_adapter_extensions,
|
||||
regional_prompting_extension=regional_prompting_extension,
|
||||
)
|
||||
|
||||
if controlnet_double_block_residuals is not None:
|
||||
@@ -140,7 +145,17 @@ class Flux(nn.Module):
|
||||
assert len(controlnet_single_block_residuals) == len(self.single_blocks)
|
||||
|
||||
for block_index, block in enumerate(self.single_blocks):
|
||||
img = block(img, vec=vec, pe=pe)
|
||||
assert isinstance(block, SingleStreamBlock)
|
||||
img = CustomSingleStreamBlockProcessor.custom_single_block_forward(
|
||||
timestep_index=timestep_index,
|
||||
total_num_timesteps=total_num_timesteps,
|
||||
block_index=block_index,
|
||||
block=block,
|
||||
img=img,
|
||||
vec=vec,
|
||||
pe=pe,
|
||||
regional_prompting_extension=regional_prompting_extension,
|
||||
)
|
||||
|
||||
if controlnet_single_block_residuals is not None:
|
||||
img[:, txt.shape[1] :, ...] += controlnet_single_block_residuals[block_index]
|
||||
|
||||
@@ -1,32 +1,53 @@
|
||||
# Initially pulled from https://github.com/black-forest-labs/flux
|
||||
|
||||
|
||||
from torch import Tensor, nn
|
||||
from transformers import PreTrainedModel, PreTrainedTokenizer
|
||||
|
||||
|
||||
class HFEncoder(nn.Module):
|
||||
def __init__(self, encoder: PreTrainedModel, tokenizer: PreTrainedTokenizer, is_clip: bool, max_length: int):
|
||||
def __init__(self, encoder: PreTrainedModel, tokenizer: PreTrainedTokenizer, is_clip: bool):
|
||||
super().__init__()
|
||||
self.max_length = max_length
|
||||
self.is_clip = is_clip
|
||||
self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"
|
||||
self.tokenizer = tokenizer
|
||||
self.hf_module = encoder
|
||||
self.hf_module = self.hf_module.eval().requires_grad_(False)
|
||||
|
||||
def forward(self, text: list[str]) -> Tensor:
|
||||
def forward(self, text: list[str], valid_seq_lens: list[int]) -> Tensor:
|
||||
"""Encode text into a tensor.
|
||||
|
||||
Args:
|
||||
text: A list of text prompts to encode.
|
||||
valid_seq_lens: A list of valid sequence lengths. The shortest valid sequence length that can contain the
|
||||
text will be used. If the largest valid sequence length cannot contain the text, the encoding will be
|
||||
truncated.
|
||||
"""
|
||||
valid_seq_lens = sorted(valid_seq_lens)
|
||||
|
||||
# Perform initial encoding with the maximum valid sequence length.
|
||||
batch_encoding = self.tokenizer(
|
||||
text,
|
||||
truncation=True,
|
||||
max_length=self.max_length,
|
||||
return_length=False,
|
||||
max_length=max(valid_seq_lens),
|
||||
return_length=True,
|
||||
return_overflowing_tokens=False,
|
||||
padding="max_length",
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# Find selected_seq_len, the minimum valid sequence length that can contain all of the input tokens.
|
||||
seq_len: int = batch_encoding["length"][0].item()
|
||||
selected_seq_len = valid_seq_lens[-1]
|
||||
for len in valid_seq_lens:
|
||||
if len >= seq_len:
|
||||
selected_seq_len = len
|
||||
break
|
||||
|
||||
input_ids = batch_encoding["input_ids"][..., :selected_seq_len]
|
||||
|
||||
outputs = self.hf_module(
|
||||
input_ids=batch_encoding["input_ids"].to(self.hf_module.device),
|
||||
input_ids=input_ids.to(self.hf_module.device),
|
||||
attention_mask=None,
|
||||
output_hidden_states=False,
|
||||
)
|
||||
|
||||
36
invokeai/backend/flux/text_conditioning.py
Normal file
36
invokeai/backend/flux/text_conditioning.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Range
|
||||
|
||||
|
||||
@dataclass
|
||||
class FluxTextConditioning:
|
||||
t5_embeddings: torch.Tensor
|
||||
clip_embeddings: torch.Tensor
|
||||
# If mask is None, the prompt is a global prompt.
|
||||
mask: torch.Tensor | None
|
||||
|
||||
|
||||
@dataclass
|
||||
class FluxRegionalTextConditioning:
|
||||
# Concatenated text embeddings.
|
||||
# Shape: (1, concatenated_txt_seq_len, 4096)
|
||||
t5_embeddings: torch.Tensor
|
||||
# Shape: (1, concatenated_txt_seq_len, 3)
|
||||
t5_txt_ids: torch.Tensor
|
||||
|
||||
# Global CLIP embeddings.
|
||||
# Shape: (1, 768)
|
||||
clip_embeddings: torch.Tensor
|
||||
|
||||
# A binary mask indicating the regions of the image that the prompt should be applied to. If None, the prompt is a
|
||||
# global prompt.
|
||||
# image_masks[i] is the mask for the ith prompt.
|
||||
# image_masks[i] has shape (1, image_seq_len) and dtype torch.bool.
|
||||
image_masks: list[torch.Tensor | None]
|
||||
|
||||
# List of ranges that represent the embedding ranges for each mask.
|
||||
# t5_embedding_ranges[i] contains the range of the t5 embeddings that correspond to image_masks[i].
|
||||
t5_embedding_ranges: list[Range]
|
||||
BIN
invokeai/backend/image_util/assets/CIELab_to_UPLab.icc
Normal file
BIN
invokeai/backend/image_util/assets/CIELab_to_UPLab.icc
Normal file
Binary file not shown.
1020
invokeai/backend/image_util/composition.py
Normal file
1020
invokeai/backend/image_util/composition.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -45,8 +45,9 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
# Constants for FLUX.1
|
||||
num_double_layers = 19
|
||||
num_single_layers = 38
|
||||
# inner_dim = 3072
|
||||
# mlp_ratio = 4.0
|
||||
hidden_size = 3072
|
||||
mlp_ratio = 4.0
|
||||
mlp_hidden_dim = int(hidden_size * mlp_ratio)
|
||||
|
||||
layers: dict[str, AnyLoRALayer] = {}
|
||||
|
||||
@@ -62,30 +63,43 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
layers[dst_key] = LoRALayer.from_state_dict_values(values=value)
|
||||
assert len(src_layer_dict) == 0
|
||||
|
||||
def add_qkv_lora_layer_if_present(src_keys: list[str], dst_qkv_key: str) -> None:
|
||||
def add_qkv_lora_layer_if_present(
|
||||
src_keys: list[str],
|
||||
src_weight_shapes: list[tuple[int, int]],
|
||||
dst_qkv_key: str,
|
||||
allow_missing_keys: bool = False,
|
||||
) -> None:
|
||||
"""Handle the Q, K, V matrices for a transformer block. We need special handling because the diffusers format
|
||||
stores them in separate matrices, whereas the BFL format used internally by InvokeAI concatenates them.
|
||||
"""
|
||||
# We expect that either all src keys are present or none of them are. Verify this.
|
||||
keys_present = [key in grouped_state_dict for key in src_keys]
|
||||
assert all(keys_present) or not any(keys_present)
|
||||
|
||||
# If none of the keys are present, return early.
|
||||
keys_present = [key in grouped_state_dict for key in src_keys]
|
||||
if not any(keys_present):
|
||||
return
|
||||
|
||||
src_layer_dicts = [grouped_state_dict.pop(key) for key in src_keys]
|
||||
sub_layers: list[LoRALayer] = []
|
||||
for src_layer_dict in src_layer_dicts:
|
||||
values = {
|
||||
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
|
||||
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
|
||||
}
|
||||
if alpha is not None:
|
||||
values["alpha"] = torch.tensor(alpha)
|
||||
sub_layers.append(LoRALayer.from_state_dict_values(values=values))
|
||||
assert len(src_layer_dict) == 0
|
||||
layers[dst_qkv_key] = ConcatenatedLoRALayer(lora_layers=sub_layers, concat_axis=0)
|
||||
for src_key, src_weight_shape in zip(src_keys, src_weight_shapes, strict=True):
|
||||
src_layer_dict = grouped_state_dict.pop(src_key, None)
|
||||
if src_layer_dict is not None:
|
||||
values = {
|
||||
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
|
||||
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
|
||||
}
|
||||
if alpha is not None:
|
||||
values["alpha"] = torch.tensor(alpha)
|
||||
assert values["lora_down.weight"].shape[1] == src_weight_shape[1]
|
||||
assert values["lora_up.weight"].shape[0] == src_weight_shape[0]
|
||||
sub_layers.append(LoRALayer.from_state_dict_values(values=values))
|
||||
assert len(src_layer_dict) == 0
|
||||
else:
|
||||
if not allow_missing_keys:
|
||||
raise ValueError(f"Missing LoRA layer: '{src_key}'.")
|
||||
values = {
|
||||
"lora_up.weight": torch.zeros((src_weight_shape[0], 1)),
|
||||
"lora_down.weight": torch.zeros((1, src_weight_shape[1])),
|
||||
}
|
||||
sub_layers.append(LoRALayer.from_state_dict_values(values=values))
|
||||
layers[dst_qkv_key] = ConcatenatedLoRALayer(lora_layers=sub_layers)
|
||||
|
||||
# time_text_embed.timestep_embedder -> time_in.
|
||||
add_lora_layer_if_present("time_text_embed.timestep_embedder.linear_1", "time_in.in_layer")
|
||||
@@ -118,6 +132,7 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
f"transformer_blocks.{i}.attn.to_k",
|
||||
f"transformer_blocks.{i}.attn.to_v",
|
||||
],
|
||||
[(hidden_size, hidden_size), (hidden_size, hidden_size), (hidden_size, hidden_size)],
|
||||
f"double_blocks.{i}.img_attn.qkv",
|
||||
)
|
||||
add_qkv_lora_layer_if_present(
|
||||
@@ -126,6 +141,7 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
f"transformer_blocks.{i}.attn.add_k_proj",
|
||||
f"transformer_blocks.{i}.attn.add_v_proj",
|
||||
],
|
||||
[(hidden_size, hidden_size), (hidden_size, hidden_size), (hidden_size, hidden_size)],
|
||||
f"double_blocks.{i}.txt_attn.qkv",
|
||||
)
|
||||
|
||||
@@ -175,7 +191,14 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
f"single_transformer_blocks.{i}.attn.to_v",
|
||||
f"single_transformer_blocks.{i}.proj_mlp",
|
||||
],
|
||||
[
|
||||
(hidden_size, hidden_size),
|
||||
(hidden_size, hidden_size),
|
||||
(hidden_size, hidden_size),
|
||||
(mlp_hidden_dim, hidden_size),
|
||||
],
|
||||
f"single_blocks.{i}.linear1",
|
||||
allow_missing_keys=True,
|
||||
)
|
||||
|
||||
# Output projections.
|
||||
|
||||
@@ -35,6 +35,7 @@ class ModelLoader(ModelLoaderBase):
|
||||
self._logger = logger
|
||||
self._ram_cache = ram_cache
|
||||
self._torch_dtype = TorchDevice.choose_torch_dtype()
|
||||
self._torch_device = TorchDevice.choose_torch_device()
|
||||
|
||||
def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel:
|
||||
"""
|
||||
|
||||
@@ -84,7 +84,15 @@ class FluxVAELoader(ModelLoader):
|
||||
model = AutoEncoder(ae_params[config.config_path])
|
||||
sd = load_file(model_path)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
model.to(dtype=self._torch_dtype)
|
||||
# VAE is broken in float16, which mps defaults to
|
||||
if self._torch_dtype == torch.float16:
|
||||
try:
|
||||
vae_dtype = torch.tensor([1.0], dtype=torch.bfloat16, device=self._torch_device).dtype
|
||||
except TypeError:
|
||||
vae_dtype = torch.float32
|
||||
else:
|
||||
vae_dtype = self._torch_dtype
|
||||
model.to(vae_dtype)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
@@ -469,7 +469,7 @@ class ModelProbe(object):
|
||||
"""
|
||||
# scan model
|
||||
scan_result = scan_file_path(checkpoint)
|
||||
if scan_result.infected_files != 0:
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise Exception("The model {model_name} is potentially infected by malware. Aborting import.")
|
||||
|
||||
|
||||
@@ -485,6 +485,7 @@ MODEL_NAME_TO_PREPROCESSOR = {
|
||||
"lineart anime": "lineart_anime_image_processor",
|
||||
"lineart_anime": "lineart_anime_image_processor",
|
||||
"lineart": "lineart_image_processor",
|
||||
"soft": "hed_image_processor",
|
||||
"softedge": "hed_image_processor",
|
||||
"hed": "hed_image_processor",
|
||||
"shuffle": "content_shuffle_image_processor",
|
||||
|
||||
@@ -298,13 +298,12 @@ ip_adapter_sdxl = StarterModel(
|
||||
previous_names=["IP Adapter SDXL"],
|
||||
)
|
||||
ip_adapter_flux = StarterModel(
|
||||
name="Standard Reference (XLabs FLUX IP-Adapter)",
|
||||
name="Standard Reference (XLabs FLUX IP-Adapter v2)",
|
||||
base=BaseModelType.Flux,
|
||||
source="https://huggingface.co/XLabs-AI/flux-ip-adapter/resolve/main/ip_adapter.safetensors",
|
||||
source="https://huggingface.co/XLabs-AI/flux-ip-adapter-v2/resolve/main/ip_adapter.safetensors",
|
||||
description="References images with a more generalized/looser degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[clip_vit_l_image_encoder],
|
||||
previous_names=["XLabs FLUX IP-Adapter"],
|
||||
)
|
||||
# endregion
|
||||
# region ControlNet
|
||||
|
||||
@@ -44,7 +44,7 @@ def _fast_safetensors_reader(path: str) -> Dict[str, torch.Tensor]:
|
||||
return checkpoint
|
||||
|
||||
|
||||
def read_checkpoint_meta(path: Union[str, Path], scan: bool = False) -> Dict[str, torch.Tensor]:
|
||||
def read_checkpoint_meta(path: Union[str, Path], scan: bool = True) -> Dict[str, torch.Tensor]:
|
||||
if str(path).endswith(".safetensors"):
|
||||
try:
|
||||
path_str = path.as_posix() if isinstance(path, Path) else path
|
||||
@@ -55,7 +55,7 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = False) -> Dict[str
|
||||
else:
|
||||
if scan:
|
||||
scan_result = scan_file_path(path)
|
||||
if scan_result.infected_files != 0:
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise Exception(f'The model file "{path}" is potentially infected by malware. Aborting import.')
|
||||
if str(path).endswith(".gguf"):
|
||||
# The GGUF reader used here uses numpy memmap, so these tensors are not loaded into memory during this function
|
||||
|
||||
0
invokeai/backend/sd3/__init__.py
Normal file
0
invokeai/backend/sd3/__init__.py
Normal file
0
invokeai/backend/sd3/extensions/__init__.py
Normal file
0
invokeai/backend/sd3/extensions/__init__.py
Normal file
58
invokeai/backend/sd3/extensions/inpaint_extension.py
Normal file
58
invokeai/backend/sd3/extensions/inpaint_extension.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import torch
|
||||
|
||||
|
||||
class InpaintExtension:
|
||||
"""A class for managing inpainting with SD3."""
|
||||
|
||||
def __init__(self, init_latents: torch.Tensor, inpaint_mask: torch.Tensor, noise: torch.Tensor):
|
||||
"""Initialize InpaintExtension.
|
||||
|
||||
Args:
|
||||
init_latents (torch.Tensor): The initial latents (i.e. un-noised at timestep 0).
|
||||
inpaint_mask (torch.Tensor): A mask specifying which elements to inpaint. Range [0, 1]. Values of 1 will be
|
||||
re-generated. Values of 0 will remain unchanged. Values between 0 and 1 can be used to blend the
|
||||
inpainted region with the background.
|
||||
noise (torch.Tensor): The noise tensor used to noise the init_latents.
|
||||
"""
|
||||
assert init_latents.dim() == inpaint_mask.dim() == noise.dim() == 4
|
||||
assert init_latents.shape[-2:] == inpaint_mask.shape[-2:] == noise.shape[-2:]
|
||||
|
||||
self._init_latents = init_latents
|
||||
self._inpaint_mask = inpaint_mask
|
||||
self._noise = noise
|
||||
|
||||
def _apply_mask_gradient_adjustment(self, t_prev: float) -> torch.Tensor:
|
||||
"""Applies inpaint mask gradient adjustment and returns the inpaint mask to be used at the current timestep."""
|
||||
# As we progress through the denoising process, we promote gradient regions of the mask to have a full weight of
|
||||
# 1.0. This helps to produce more coherent seams around the inpainted region. We experimented with a (small)
|
||||
# number of promotion strategies (e.g. gradual promotion based on timestep), but found that a simple cutoff
|
||||
# threshold worked well.
|
||||
# We use a small epsilon to avoid any potential issues with floating point precision.
|
||||
eps = 1e-4
|
||||
mask_gradient_t_cutoff = 0.5
|
||||
if t_prev > mask_gradient_t_cutoff:
|
||||
# Early in the denoising process, use the inpaint mask as-is.
|
||||
return self._inpaint_mask
|
||||
else:
|
||||
# After the cut-off, promote all non-zero mask values to 1.0.
|
||||
mask = self._inpaint_mask.where(self._inpaint_mask <= (0.0 + eps), 1.0)
|
||||
|
||||
return mask
|
||||
|
||||
def merge_intermediate_latents_with_init_latents(
|
||||
self, intermediate_latents: torch.Tensor, t_prev: float
|
||||
) -> torch.Tensor:
|
||||
"""Merge the intermediate latents with the initial latents for the current timestep using the inpaint mask. I.e.
|
||||
update the intermediate latents to keep the regions that are not being inpainted on the correct noise
|
||||
trajectory.
|
||||
|
||||
This function should be called after each denoising step.
|
||||
"""
|
||||
|
||||
mask = self._apply_mask_gradient_adjustment(t_prev)
|
||||
|
||||
# Noise the init latents for the current timestep.
|
||||
noised_init_latents = self._noise * t_prev + (1.0 - t_prev) * self._init_latents
|
||||
|
||||
# Merge the intermediate latents with the noised_init_latents using the inpaint_mask.
|
||||
return intermediate_latents * mask + noised_init_latents * (1.0 - mask)
|
||||
@@ -58,7 +58,7 @@
|
||||
"@dagrejs/dagre": "^1.1.4",
|
||||
"@dagrejs/graphlib": "^2.2.4",
|
||||
"@fontsource-variable/inter": "^5.1.0",
|
||||
"@invoke-ai/ui-library": "^0.0.43",
|
||||
"@invoke-ai/ui-library": "^0.0.44",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
|
||||
76
invokeai/frontend/web/pnpm-lock.yaml
generated
76
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -24,8 +24,8 @@ dependencies:
|
||||
specifier: ^5.1.0
|
||||
version: 5.1.0
|
||||
'@invoke-ai/ui-library':
|
||||
specifier: ^0.0.43
|
||||
version: 0.0.43(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
specifier: ^0.0.44
|
||||
version: 0.0.44(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@nanostores/react':
|
||||
specifier: ^0.7.3
|
||||
version: 0.7.3(nanostores@0.11.3)(react@18.3.1)
|
||||
@@ -515,8 +515,8 @@ packages:
|
||||
resolution: {integrity: sha512-MV6D4VLRIHr4PkW4zMyqfrNS1mPlCTiCXwvYGtDFQYr+xHFfonhAuf9WjsSc0nyp2m0OdkSLnzmVKkZFLo25Tg==}
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/anatomy@2.3.4:
|
||||
resolution: {integrity: sha512-fFIYN7L276gw0Q7/ikMMlZxP7mvnjRaWJ7f3Jsf9VtDOi6eAYIBRrhQe6+SZ0PGmoOkRaBc7gSE5oeIbgFFyrw==}
|
||||
/@chakra-ui/anatomy@2.3.5:
|
||||
resolution: {integrity: sha512-3im33cUOxCbISjaBlINE2u8BOwJSCdzpjCX0H+0JxK2xz26UaVA5xeI3NYHUoxDnr/QIrgfrllGxS0szYwOcyg==}
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/breakpoint-utils@2.0.8:
|
||||
@@ -573,12 +573,12 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/hooks@2.4.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-LRKiVE1oA7afT5tbbSKAy7Uas2xFHE6IkrQdbhWCHmkHBUtPvjQQDgwtnd4IRZPmoEfNGwoJ/MQpwOM/NRTTwA==}
|
||||
/@chakra-ui/hooks@2.4.3(react@18.3.1):
|
||||
resolution: {integrity: sha512-Sr2zsoTZw3p7HbrUy4aLpTIkE2XXUelAUgg3NGwMzrmx75bE0qVyiuuTFOuyEzGxYVV2Fe8QtcKKilm6RwzTGg==}
|
||||
peerDependencies:
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
'@zag-js/element-size': 0.31.1
|
||||
copy-to-clipboard: 3.3.3
|
||||
framesync: 6.1.2
|
||||
@@ -596,13 +596,13 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/icons@2.2.4(@chakra-ui/react@2.10.2)(react@18.3.1):
|
||||
/@chakra-ui/icons@2.2.4(@chakra-ui/react@2.10.4)(react@18.3.1):
|
||||
resolution: {integrity: sha512-l5QdBgwrAg3Sc2BRqtNkJpfuLw/pWRDwwT58J6c4PqQT6wzXxyNa8Q0PForu1ltB5qEiFb1kxr/F/HO1EwNa6g==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/react': '>=2.0.0'
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/react': 2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.4(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@@ -825,8 +825,8 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/react@2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-TfIHTqTlxTHYJZBtpiR5EZasPUrLYKJxdbHkdOJb5G1OQ+2c5kKl5XA7c2pMtsEptzb7KxAAIB62t3hxdfWp1w==}
|
||||
/@chakra-ui/react@2.10.4(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-XyRWnuZ1Uw7Mlj5pKUGO5/WhnIHP/EOrpy6lGZC1yWlkd0eIfIpYMZ1ALTZx4KPEdbBaes48dgiMT2ROCqLhkA==}
|
||||
peerDependencies:
|
||||
'@emotion/react': '>=11'
|
||||
'@emotion/styled': '>=11'
|
||||
@@ -834,10 +834,10 @@ packages:
|
||||
react: '>=18'
|
||||
react-dom: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/hooks': 2.4.2(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme': 3.4.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/hooks': 2.4.3(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.12.1(react@18.3.1)
|
||||
'@chakra-ui/theme': 3.4.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.11)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.11)(react@18.3.1)
|
||||
'@popperjs/core': 2.11.8
|
||||
@@ -868,10 +868,10 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/styled-system@2.11.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-y++z2Uop+hjfZX9mbH88F1ikazPv32asD2er56zMJBemUAzweXnHTpiCQbluEDSUDhqmghVZAdb+5L4XLbsRxA==}
|
||||
/@chakra-ui/styled-system@2.12.1(react@18.3.1):
|
||||
resolution: {integrity: sha512-DQph1nDiCPtgze7nDe0a36530ByXb5VpPosKGyWMvKocVeZJcDtYG6XM0+V5a0wKuFBXsViBBRIFUTiUesJAcg==}
|
||||
dependencies:
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
csstype: 3.1.3
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
@@ -915,14 +915,14 @@ packages:
|
||||
color2k: 2.0.3
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/theme-tools@2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1):
|
||||
resolution: {integrity: sha512-3UhKPyzKbV3l/bg1iQN9PBvffYp+EBOoYMUaeTUdieQRPFzo2jbYR0lNCxqv8h5aGM/k54nCHU2M/GStyi9F2A==}
|
||||
/@chakra-ui/theme-tools@2.2.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-K/VJd0QcnKik7m+qZTkggqNLep6+MPUu8IP5TUpHsnSM5R/RVjsJIR7gO8IZVAIMIGLLTIhGshHxeMekqv6LcQ==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/styled-system': '>=2.0.0'
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/anatomy': 2.3.5
|
||||
'@chakra-ui/styled-system': 2.12.1(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
color2k: 2.0.3
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
@@ -948,15 +948,15 @@ packages:
|
||||
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/theme@3.4.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1):
|
||||
resolution: {integrity: sha512-ZwFBLfiMC3URwaO31ONXoKH9k0TX0OW3UjdPF3EQkQpYyrk/fm36GkkzajjtdpWEd7rzDLRsQjPmvwNaSoNDtg==}
|
||||
/@chakra-ui/theme@3.4.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-pfewthgZTFNUYeUwGvhPQO/FTIyf375cFV1AT8N1y0aJiw4KDe7YTGm7p0aFy4AwAjH2ydMgeEx/lua4tx8qyQ==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/styled-system': '>=2.8.0'
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/anatomy': 2.3.5
|
||||
'@chakra-ui/styled-system': 2.12.1(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
dev: false
|
||||
@@ -981,8 +981,8 @@ packages:
|
||||
lodash.mergewith: 4.6.2
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/utils@2.2.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-jUPLT0JzRMWxpdzH6c+t0YMJYrvc5CLericgITV3zDSXblkfx3DsYXqU11DJTSGZI9dUKzM1Wd0Wswn4eJwvFQ==}
|
||||
/@chakra-ui/utils@2.2.3(react@18.3.1):
|
||||
resolution: {integrity: sha512-cldoCQuexZ6e07/9hWHKD4l1QXXlM1Nax9tuQOBvVf/EgwNZt3nZu8zZRDFlhAOKCTQDkmpLTTu+eXXjChNQOw==}
|
||||
peerDependencies:
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
@@ -1675,20 +1675,20 @@ packages:
|
||||
prettier: 3.3.3
|
||||
dev: true
|
||||
|
||||
/@invoke-ai/ui-library@0.0.43(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-t3fPYyks07ue3dEBPJuTHbeDLnDckDCOrtvc07mMDbLOnlPEZ0StaeiNGH+oO8qLzAuMAlSTdswgHfzTc2MmPw==}
|
||||
/@invoke-ai/ui-library@0.0.44(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-PDseHmdr8oi8cmrpx3UwIYHn4NduAJX2R0pM0pyM54xrCMPMgYiCbC/eOs8Gt4fBc2ziiPZ9UGoW4evnE3YJsg==}
|
||||
peerDependencies:
|
||||
'@fontsource-variable/inter': ^5.0.16
|
||||
react: ^18.2.0
|
||||
react-dom: ^18.2.0
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/icons': 2.2.4(@chakra-ui/react@2.10.2)(react@18.3.1)
|
||||
'@chakra-ui/anatomy': 2.2.2
|
||||
'@chakra-ui/icons': 2.2.4(@chakra-ui/react@2.10.4)(react@18.3.1)
|
||||
'@chakra-ui/layout': 2.3.1(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/portal': 2.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.4(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.9.2
|
||||
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.11)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.11)(react@18.3.1)
|
||||
'@fontsource-variable/inter': 5.1.0
|
||||
|
||||
@@ -768,7 +768,8 @@
|
||||
"deletedPrivateBoardsCannotbeRestored": "Gelöschte Boards können nicht wiederhergestellt werden. Wenn Sie „Nur Board löschen“ wählen, werden die Bilder in einen privaten, nicht kategorisierten Status für den Ersteller des Bildes versetzt.",
|
||||
"assetsWithCount_one": "{{count}} in der Sammlung",
|
||||
"assetsWithCount_other": "{{count}} in der Sammlung",
|
||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand."
|
||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand.",
|
||||
"updateBoardError": "Fehler beim Aktualisieren des Ordners"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@@ -871,7 +872,9 @@
|
||||
"recallParameter": "{{label}} Abrufen",
|
||||
"parsingFailed": "Parsing Fehlgeschlagen",
|
||||
"canvasV2Metadata": "Leinwand",
|
||||
"guidance": "Führung"
|
||||
"guidance": "Führung",
|
||||
"seamlessXAxis": "Nahtlose X Achse",
|
||||
"seamlessYAxis": "Nahtlose Y Achse"
|
||||
},
|
||||
"popovers": {
|
||||
"noiseUseCPU": {
|
||||
@@ -1392,7 +1395,13 @@
|
||||
"pullBboxIntoLayerOk": "Bbox in die Ebene gezogen",
|
||||
"saveBboxToGallery": "Bbox in Galerie speichern",
|
||||
"tool": {
|
||||
"bbox": "Bbox"
|
||||
"bbox": "Bbox",
|
||||
"brush": "Pinsel",
|
||||
"eraser": "Radiergummi",
|
||||
"colorPicker": "Farbwähler",
|
||||
"view": "Ansicht",
|
||||
"rectangle": "Rechteck",
|
||||
"move": "Verschieben"
|
||||
},
|
||||
"transform": {
|
||||
"fitToBbox": "An Bbox anpassen",
|
||||
@@ -1434,7 +1443,6 @@
|
||||
"deleteReferenceImage": "Referenzbild löschen",
|
||||
"referenceImage": "Referenzbild",
|
||||
"opacity": "Opazität",
|
||||
"resetCanvas": "Leinwand zurücksetzen",
|
||||
"removeBookmark": "Lesezeichen entfernen",
|
||||
"rasterLayer": "Raster-Ebene",
|
||||
"rasterLayers_withCount_visible": "Raster-Ebenen ({{count}})",
|
||||
@@ -1511,7 +1519,30 @@
|
||||
"layer_one": "Ebene",
|
||||
"layer_other": "Ebenen",
|
||||
"layer_withCount_one": "Ebene ({{count}})",
|
||||
"layer_withCount_other": "Ebenen ({{count}})"
|
||||
"layer_withCount_other": "Ebenen ({{count}})",
|
||||
"fill": {
|
||||
"fillStyle": "Füllstil",
|
||||
"diagonal": "Diagonal",
|
||||
"vertical": "Vertikal",
|
||||
"fillColor": "Füllfarbe",
|
||||
"grid": "Raster",
|
||||
"solid": "Solide",
|
||||
"crosshatch": "Kreuzschraffur",
|
||||
"horizontal": "Horizontal"
|
||||
},
|
||||
"filter": {
|
||||
"apply": "Anwenden",
|
||||
"reset": "Zurücksetzen",
|
||||
"cancel": "Abbrechen",
|
||||
"spandrel_filter": {
|
||||
"label": "Bild-zu-Bild Modell",
|
||||
"description": "Ein Bild-zu-Bild Modell auf der ausgewählten Ebene ausführen.",
|
||||
"model": "Modell"
|
||||
},
|
||||
"filters": "Filter",
|
||||
"filterType": "Filtertyp",
|
||||
"filter": "Filter"
|
||||
}
|
||||
},
|
||||
"upsell": {
|
||||
"shareAccess": "Zugang teilen",
|
||||
|
||||
@@ -122,6 +122,7 @@
|
||||
"goTo": "Go to",
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"loadingImage": "Loading Image",
|
||||
"loadingModel": "Loading Model",
|
||||
"imageFailedToLoad": "Unable to Load Image",
|
||||
"img2img": "Image To Image",
|
||||
"inpaint": "inpaint",
|
||||
@@ -174,7 +175,9 @@
|
||||
"placeholderSelectAModel": "Select a model",
|
||||
"reset": "Reset",
|
||||
"none": "None",
|
||||
"new": "New"
|
||||
"new": "New",
|
||||
"generating": "Generating",
|
||||
"warnings": "Warnings"
|
||||
},
|
||||
"hrf": {
|
||||
"hrf": "High Resolution Fix",
|
||||
@@ -261,7 +264,8 @@
|
||||
"iterations_one": "Iteration",
|
||||
"iterations_other": "Iterations",
|
||||
"generations_one": "Generation",
|
||||
"generations_other": "Generations"
|
||||
"generations_other": "Generations",
|
||||
"batchSize": "Batch Size"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "Invocation Cache",
|
||||
@@ -704,6 +708,8 @@
|
||||
"baseModel": "Base Model",
|
||||
"cancel": "Cancel",
|
||||
"clipEmbed": "CLIP Embed",
|
||||
"clipLEmbed": "CLIP-L Embed",
|
||||
"clipGEmbed": "CLIP-G Embed",
|
||||
"config": "Config",
|
||||
"convert": "Convert",
|
||||
"convertingModelBegin": "Converting Model. Please wait.",
|
||||
@@ -973,6 +979,8 @@
|
||||
"zoomOutNodes": "Zoom Out",
|
||||
"betaDesc": "This invocation is in beta. Until it is stable, it may have breaking changes during app updates. We plan to support this invocation long-term.",
|
||||
"prototypeDesc": "This invocation is a prototype. It may have breaking changes during app updates and may be removed at any time.",
|
||||
"internalDesc": "This invocation is used internally by Invoke. It may have breaking changes during app updates and may be removed at any time.",
|
||||
"specialDesc": "This invocation some special handling in the app. For example, Batch nodes are used to queue multiple graphs from a single workflow.",
|
||||
"imageAccessError": "Unable to find image {{image_name}}, resetting to default",
|
||||
"boardAccessError": "Unable to find board {{board_id}}, resetting to default",
|
||||
"modelAccessError": "Unable to find model {{key}}, resetting to default",
|
||||
@@ -1011,8 +1019,11 @@
|
||||
"addingImagesTo": "Adding images to",
|
||||
"invoke": "Invoke",
|
||||
"missingFieldTemplate": "Missing field template",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} missing input",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}}: missing input",
|
||||
"missingNodeTemplate": "Missing node template",
|
||||
"collectionEmpty": "{{nodeLabel}} -> {{fieldLabel}} empty collection",
|
||||
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}}: too few items, minimum {{minItems}}",
|
||||
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}}: too many items, maximum {{maxItems}}",
|
||||
"noModelSelected": "No model selected",
|
||||
"noT5EncoderModelSelected": "No T5 Encoder model selected for FLUX generation",
|
||||
"noFLUXVAEModelSelected": "No VAE model selected for FLUX generation",
|
||||
@@ -1021,14 +1032,16 @@
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bbox height is {{height}}",
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), scaled bbox width is {{width}}",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), scaled bbox height is {{height}}",
|
||||
"canvasIsFiltering": "Canvas is filtering",
|
||||
"canvasIsTransforming": "Canvas is transforming",
|
||||
"canvasIsRasterizing": "Canvas is rasterizing",
|
||||
"canvasIsCompositing": "Canvas is compositing",
|
||||
"canvasIsFiltering": "Canvas is busy (filtering)",
|
||||
"canvasIsTransforming": "Canvas is busy (transforming)",
|
||||
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
|
||||
"canvasIsCompositing": "Canvas is busy (compositing)",
|
||||
"canvasIsSelectingObject": "Canvas is busy (selecting object)",
|
||||
"noPrompts": "No prompts generated",
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"systemDisconnected": "System disconnected",
|
||||
"layer": {
|
||||
"unsupportedModel": "layer not supported for selected base model",
|
||||
"controlAdapterNoModelSelected": "no Control Adapter model selected",
|
||||
"controlAdapterIncompatibleBaseModel": "incompatible Control Adapter base model",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, bbox width is {{width}}",
|
||||
@@ -1039,7 +1052,10 @@
|
||||
"ipAdapterIncompatibleBaseModel": "incompatible IP Adapter base model",
|
||||
"ipAdapterNoImageSelected": "no IP Adapter image selected",
|
||||
"rgNoPromptsOrIPAdapters": "no text prompts or IP Adapters",
|
||||
"rgNoRegion": "no region selected"
|
||||
"rgNegativePromptNotSupported": "negative prompt not supported for selected base model",
|
||||
"rgReferenceImagesNotSupported": "regional reference images not supported for selected base model",
|
||||
"rgAutoNegativeNotSupported": "auto-negative not supported for selected base model",
|
||||
"emptyLayer": "empty layer"
|
||||
}
|
||||
},
|
||||
"maskBlur": "Mask Blur",
|
||||
@@ -1137,6 +1153,7 @@
|
||||
"resetWebUI": "Reset Web UI",
|
||||
"resetWebUIDesc1": "Resetting the web UI only resets the browser's local cache of your images and remembered settings. It does not delete any images from disk.",
|
||||
"resetWebUIDesc2": "If images aren't showing up in the gallery or something else isn't working, please try resetting before submitting an issue on GitHub.",
|
||||
"showDetailedInvocationProgress": "Show Progress Details",
|
||||
"showProgressInViewer": "Show Progress Images in Viewer",
|
||||
"ui": "User Interface",
|
||||
"clearIntermediatesDisabled": "Queue must be empty to clear intermediates",
|
||||
@@ -1307,8 +1324,9 @@
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Begin / End Step Percentage",
|
||||
"paragraphs": [
|
||||
"The part of the of the denoising process that will have the Control Adapter applied.",
|
||||
"Generally, Control Adapters applied at the start of the process guide composition, and Control Adapters applied at the end guide details."
|
||||
"This setting determines which portion of the denoising (generation) process incorporates the guidance from this layer.",
|
||||
"• Start Step (%): Specifies when to begin applying the guidance from this layer during the generation process.",
|
||||
"• End Step (%): Specifies when to stop applying this layer's guidance and revert general guidance from the model and other settings."
|
||||
]
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
@@ -1318,7 +1336,7 @@
|
||||
"controlNetProcessor": {
|
||||
"heading": "Processor",
|
||||
"paragraphs": [
|
||||
"Method of processing the input image to guide the generation process. Different processors will providedifferent effects or styles in your generated images."
|
||||
"Method of processing the input image to guide the generation process. Different processors will provide different effects or styles in your generated images."
|
||||
]
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
@@ -1326,13 +1344,15 @@
|
||||
"paragraphs": ["Method to fit Control Adapter's input image size to the output generation size."]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Method",
|
||||
"paragraphs": ["Method by which to apply the current IP Adapter."]
|
||||
"heading": "Mode",
|
||||
"paragraphs": ["The mode defines how the reference image will guide the generation process."]
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "Weight",
|
||||
"paragraphs": [
|
||||
"Weight of the Control Adapter. Higher weight will lead to larger impacts on the final image."
|
||||
"Adjusts how strongly the layer influences the generation process",
|
||||
"• Higher Weight (.75-2): Creates a more significant impact on the final result.",
|
||||
"• Lower Weight (0-.75): Creates a smaller impact on the final result."
|
||||
]
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
@@ -1654,7 +1674,6 @@
|
||||
"newControlLayerError": "Problem Creating Control Layer",
|
||||
"newRasterLayerOk": "Created Raster Layer",
|
||||
"newRasterLayerError": "Problem Creating Raster Layer",
|
||||
"newFromImage": "New from Image",
|
||||
"pullBboxIntoLayerOk": "Bbox Pulled Into Layer",
|
||||
"pullBboxIntoLayerError": "Problem Pulling BBox Into Layer",
|
||||
"pullBboxIntoReferenceImageOk": "Bbox Pulled Into ReferenceImage",
|
||||
@@ -1667,11 +1686,11 @@
|
||||
"mergingLayers": "Merging layers",
|
||||
"clearHistory": "Clear History",
|
||||
"bboxOverlay": "Show Bbox Overlay",
|
||||
"resetCanvas": "Reset Canvas",
|
||||
"newSession": "New Session",
|
||||
"clearCaches": "Clear Caches",
|
||||
"recalculateRects": "Recalculate Rects",
|
||||
"clipToBbox": "Clip Strokes to Bbox",
|
||||
"outputOnlyMaskedRegions": "Output Only Masked Regions",
|
||||
"outputOnlyMaskedRegions": "Output Only Generated Regions",
|
||||
"addLayer": "Add Layer",
|
||||
"duplicate": "Duplicate",
|
||||
"moveToFront": "Move to Front",
|
||||
@@ -1699,8 +1718,10 @@
|
||||
"controlLayer": "Control Layer",
|
||||
"inpaintMask": "Inpaint Mask",
|
||||
"regionalGuidance": "Regional Guidance",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) as $t(controlLayers.rasterLayer)",
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) as $t(controlLayers.controlLayer)",
|
||||
"asRasterLayer": "As $t(controlLayers.rasterLayer)",
|
||||
"asRasterLayerResize": "As $t(controlLayers.rasterLayer) (Resize)",
|
||||
"asControlLayer": "As $t(controlLayers.controlLayer)",
|
||||
"asControlLayerResize": "As $t(controlLayers.controlLayer) (Resize)",
|
||||
"referenceImage": "Reference Image",
|
||||
"regionalReferenceImage": "Regional Reference Image",
|
||||
"globalReferenceImage": "Global Reference Image",
|
||||
@@ -1768,6 +1789,7 @@
|
||||
"pullBboxIntoLayer": "Pull Bbox into Layer",
|
||||
"pullBboxIntoReferenceImage": "Pull Bbox into Reference Image",
|
||||
"showProgressOnCanvas": "Show Progress on Canvas",
|
||||
"useImage": "Use Image",
|
||||
"prompt": "Prompt",
|
||||
"negativePrompt": "Negative Prompt",
|
||||
"beginEndStepPercentShort": "Begin/End %",
|
||||
@@ -1776,8 +1798,11 @@
|
||||
"newGallerySessionDesc": "This will clear the canvas and all settings except for your model selection. Generations will be sent to the gallery.",
|
||||
"newCanvasSession": "New Canvas Session",
|
||||
"newCanvasSessionDesc": "This will clear the canvas and all settings except for your model selection. Generations will be staged on the canvas.",
|
||||
"resetCanvasLayers": "Reset Canvas Layers",
|
||||
"resetGenerationSettings": "Reset Generation Settings",
|
||||
"replaceCurrent": "Replace Current",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer to get started.",
|
||||
"controlMode": {
|
||||
"controlMode": "Control Mode",
|
||||
"balanced": "Balanced (recommended)",
|
||||
@@ -1786,10 +1811,13 @@
|
||||
"megaControl": "Mega Control"
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"ipAdapterMethod": "IP Adapter Method",
|
||||
"full": "Full",
|
||||
"ipAdapterMethod": "Mode",
|
||||
"full": "Style and Composition",
|
||||
"fullDesc": "Applies visual style (colors, textures) & composition (layout, structure).",
|
||||
"style": "Style Only",
|
||||
"composition": "Composition Only"
|
||||
"styleDesc": "Applies visual style (colors, textures) without considering its layout.",
|
||||
"composition": "Composition Only",
|
||||
"compositionDesc": "Replicates layout & structure while ignoring the reference's style."
|
||||
},
|
||||
"fill": {
|
||||
"fillColor": "Fill Color",
|
||||
@@ -2105,11 +2133,73 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"<StrongComponent>SD 3.5</StrongComponent>: Support for Text-to-Image in Workflows with SD 3.5 Medium and Large.",
|
||||
"<StrongComponent>Canvas</StrongComponent>: Streamlined Control Layer processing and improved default Control settings."
|
||||
"<StrongComponent>Workflows</StrongComponent>: Run a workflow for a collection of images using the new <StrongComponent>Image Batch</StrongComponent> node.",
|
||||
"<StrongComponent>FLUX</StrongComponent>: Support for XLabs IP Adapter v2."
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
"watchUiUpdatesOverview": "Watch UI Updates Overview"
|
||||
},
|
||||
"supportVideos": {
|
||||
"supportVideos": "Support Videos",
|
||||
"gettingStarted": "Getting Started",
|
||||
"controlCanvas": "Control Canvas",
|
||||
"watch": "Watch",
|
||||
"studioSessionsDesc1": "Check out the <StudioSessionsPlaylistLink /> for Invoke deep dives.",
|
||||
"studioSessionsDesc2": "Join our <DiscordLink /> to participate in the live sessions and ask questions. Sessions are uploaded to the playlist the following week.",
|
||||
"videos": {
|
||||
"creatingYourFirstImage": {
|
||||
"title": "Creating Your First Image",
|
||||
"description": "Introduction to creating an image from scratch using Invoke's tools."
|
||||
},
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"title": "Using Control Layers and Reference Guides",
|
||||
"description": "Learn how to guide your image creation with control layers and reference images."
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"title": "Understanding Image-to-Image and Denoising",
|
||||
"description": "Overview of image-to-image transformations and denoising in Invoke."
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"title": "Exploring AI Models and Concept Adapters",
|
||||
"description": "Dive into AI models and how to use concept adapters for creative control."
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"title": "Creating and Composing on Invoke's Control Canvas",
|
||||
"description": "Learn to compose images using Invoke's control canvas."
|
||||
},
|
||||
"upscaling": {
|
||||
"title": "Upscaling",
|
||||
"description": "How to upscale images with Invoke's tools to enhance resolution."
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"title": "How Do I Generate and Save to the Gallery?",
|
||||
"description": "Steps to generate and save images to the gallery."
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"title": "How Do I Edit on the Canvas?",
|
||||
"description": "Guide to editing images directly on the canvas."
|
||||
},
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"title": "How Do I Do Image-to-Image Transformation?",
|
||||
"description": "Tutorial on performing image-to-image transformations in Invoke."
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "How Do I Use Control Nets and Control Layers?",
|
||||
"description": "Learn to apply control layers and controlnets to your images."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"title": "How Do I Use Global IP Adapters and Reference Images?",
|
||||
"description": "Introduction to adding reference images and global IP adapters."
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "How Do I Use Inpaint Masks?",
|
||||
"description": "How to apply inpaint masks for image correction and variation."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"title": "How Do I Outpaint?",
|
||||
"description": "Guide to outpainting beyond the original image borders."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"discordLabel": "Discord",
|
||||
"back": "Atrás",
|
||||
"loading": "Cargando",
|
||||
"postprocessing": "Postprocesado",
|
||||
"postprocessing": "Postprocesamiento",
|
||||
"txt2img": "De texto a imagen",
|
||||
"accept": "Aceptar",
|
||||
"cancel": "Cancelar",
|
||||
@@ -64,7 +64,7 @@
|
||||
"prevPage": "Página Anterior",
|
||||
"red": "Rojo",
|
||||
"alpha": "Transparencia",
|
||||
"outputs": "Salidas",
|
||||
"outputs": "Resultados",
|
||||
"learnMore": "Aprende más",
|
||||
"enabled": "Activado",
|
||||
"disabled": "Desactivado",
|
||||
@@ -73,7 +73,32 @@
|
||||
"created": "Creado",
|
||||
"save": "Guardar",
|
||||
"unknownError": "Error Desconocido",
|
||||
"blue": "Azul"
|
||||
"blue": "Azul",
|
||||
"clipboard": "Portapapeles",
|
||||
"loadingImage": "Cargando la imagen",
|
||||
"inpaint": "inpaint",
|
||||
"ipAdapter": "Adaptador IP",
|
||||
"t2iAdapter": "Adaptador T2I",
|
||||
"apply": "Aplicar",
|
||||
"openInViewer": "Abrir en el visor",
|
||||
"off": "Apagar",
|
||||
"generating": "Generando",
|
||||
"ok": "De acuerdo",
|
||||
"placeholderSelectAModel": "Seleccionar un modelo",
|
||||
"reset": "Restablecer",
|
||||
"none": "Ninguno",
|
||||
"new": "Nuevo",
|
||||
"dontShowMeThese": "No mostrar estos",
|
||||
"loadingModel": "Cargando el modelo",
|
||||
"view": "Ver",
|
||||
"edit": "Editar",
|
||||
"safetensors": "Safetensors",
|
||||
"toResolve": "Para resolver",
|
||||
"localSystem": "Sistema local",
|
||||
"notInstalled": "No $t(common.installed)",
|
||||
"outpaint": "outpaint",
|
||||
"simple": "Sencillo",
|
||||
"close": "Cerrar"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Tamaño de la imagen",
|
||||
@@ -85,7 +110,63 @@
|
||||
"deleteImage_other": "Eliminar {{count}} Imágenes",
|
||||
"deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.",
|
||||
"assets": "Activos",
|
||||
"autoAssignBoardOnClick": "Asignación automática de tableros al hacer clic"
|
||||
"autoAssignBoardOnClick": "Asignar automática tableros al hacer clic",
|
||||
"gallery": "Galería",
|
||||
"noImageSelected": "Sin imágenes seleccionadas",
|
||||
"bulkDownloadRequestFailed": "Error al preparar la descarga",
|
||||
"oldestFirst": "La más antigua primero",
|
||||
"sideBySide": "conjuntamente",
|
||||
"selectForCompare": "Seleccionar para comparar",
|
||||
"alwaysShowImageSizeBadge": "Mostrar siempre las dimensiones de la imagen",
|
||||
"currentlyInUse": "Esta imagen se utiliza actualmente con las siguientes funciones:",
|
||||
"unableToLoad": "No se puede cargar la galería",
|
||||
"selectAllOnPage": "Seleccionar todo en la página",
|
||||
"selectAnImageToCompare": "Seleccione una imagen para comparar",
|
||||
"bulkDownloadFailed": "Error en la descarga",
|
||||
"compareHelp2": "Presione <Kbd> M </Kbd> para recorrer los modos de comparación.",
|
||||
"move": "Mover",
|
||||
"copy": "Copiar",
|
||||
"drop": "Gota",
|
||||
"displayBoardSearch": "Tablero de búsqueda",
|
||||
"deleteSelection": "Borrar selección",
|
||||
"downloadSelection": "Descargar selección",
|
||||
"openInViewer": "Abrir en el visor",
|
||||
"searchImages": "Búsqueda por metadatos",
|
||||
"swapImages": "Intercambiar imágenes",
|
||||
"sortDirection": "Orden de clasificación",
|
||||
"showStarredImagesFirst": "Mostrar imágenes destacadas primero",
|
||||
"go": "Ir",
|
||||
"bulkDownloadRequested": "Preparando la descarga",
|
||||
"image": "imagen",
|
||||
"compareHelp4": "Presione <Kbd> Z </Kbd> o <Kbd> Esc </Kbd> para salir.",
|
||||
"viewerImage": "Ver imagen",
|
||||
"dropOrUpload": "$t(gallery.drop) o cargar",
|
||||
"displaySearch": "Buscar imagen",
|
||||
"download": "Descargar",
|
||||
"exitBoardSearch": "Finalizar búsqueda",
|
||||
"exitSearch": "Salir de la búsqueda de imágenes",
|
||||
"featuresWillReset": "Si elimina esta imagen, dichas funciones se restablecerán inmediatamente.",
|
||||
"jump": "Omitir",
|
||||
"loading": "Cargando",
|
||||
"newestFirst": "La más nueva primero",
|
||||
"unstarImage": "Dejar de ser favorita",
|
||||
"bulkDownloadRequestedDesc": "Su solicitud de descarga se está preparando. Esto puede tardar unos minutos.",
|
||||
"hover": "Desplazar",
|
||||
"compareHelp1": "Mantenga presionada la tecla <Kbd> Alt </Kbd> mientras hace clic en una imagen de la galería o utiliza las teclas de flecha para cambiar la imagen de comparación.",
|
||||
"stretchToFit": "Estirar para encajar",
|
||||
"exitCompare": "Salir de la comparación",
|
||||
"starImage": "Imágenes favoritas",
|
||||
"dropToUpload": "$t(gallery.drop) para cargar",
|
||||
"slider": "Deslizador",
|
||||
"assetsTab": "Archivos que has cargado para utilizarlos en tus proyectos.",
|
||||
"imagesTab": "Imágenes que ha creado y guardado en Invoke.",
|
||||
"compareImage": "Comparar imagen",
|
||||
"boardsSettings": "Ajustes de los tableros",
|
||||
"imagesSettings": "Configuración de imágenes de la galería",
|
||||
"compareHelp3": "Presione <Kbd> C </Kbd> para intercambiar las imágenes comparadas.",
|
||||
"showArchivedBoards": "Mostrar paneles archivados",
|
||||
"closeViewer": "Cerrar visor",
|
||||
"openViewer": "Abrir visor"
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "Gestor de Modelos",
|
||||
@@ -131,7 +212,13 @@
|
||||
"modelDeleted": "Modelo eliminado",
|
||||
"modelDeleteFailed": "Error al borrar el modelo",
|
||||
"settings": "Ajustes",
|
||||
"syncModels": "Sincronizar las plantillas"
|
||||
"syncModels": "Sincronizar las plantillas",
|
||||
"clipEmbed": "Incrustar CLIP",
|
||||
"addModels": "Añadir modelos",
|
||||
"advanced": "Avanzado",
|
||||
"clipGEmbed": "Incrustar CLIP-G",
|
||||
"cancel": "Cancelar",
|
||||
"clipLEmbed": "Incrustar CLIP-L"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Imágenes",
|
||||
@@ -158,19 +245,19 @@
|
||||
"useSeed": "Usar Semilla",
|
||||
"useAll": "Usar Todo",
|
||||
"info": "Información",
|
||||
"showOptionsPanel": "Mostrar panel de opciones",
|
||||
"showOptionsPanel": "Mostrar panel lateral (O o T)",
|
||||
"symmetry": "Simetría",
|
||||
"copyImage": "Copiar la imagen",
|
||||
"general": "General",
|
||||
"denoisingStrength": "Intensidad de la eliminación del ruido",
|
||||
"seamlessXAxis": "Eje x",
|
||||
"seamlessYAxis": "Eje y",
|
||||
"seamlessXAxis": "Eje X sin juntas",
|
||||
"seamlessYAxis": "Eje Y sin juntas",
|
||||
"scheduler": "Programador",
|
||||
"positivePromptPlaceholder": "Prompt Positivo",
|
||||
"negativePromptPlaceholder": "Prompt Negativo",
|
||||
"controlNetControlMode": "Modo de control",
|
||||
"clipSkip": "Omitir el CLIP",
|
||||
"maskBlur": "Difuminar",
|
||||
"maskBlur": "Desenfoque de máscara",
|
||||
"patchmatchDownScaleSize": "Reducir a escala",
|
||||
"coherenceMode": "Modo"
|
||||
},
|
||||
@@ -202,16 +289,19 @@
|
||||
"serverError": "Error en el servidor",
|
||||
"canceled": "Procesando la cancelación",
|
||||
"connected": "Conectado al servidor",
|
||||
"uploadFailedInvalidUploadDesc": "Debe ser una sola imagen PNG o JPEG",
|
||||
"parameterSet": "Conjunto de parámetros",
|
||||
"parameterNotSet": "Parámetro no configurado",
|
||||
"uploadFailedInvalidUploadDesc": "Deben ser imágenes PNG o JPEG.",
|
||||
"parameterSet": "Parámetro recuperado",
|
||||
"parameterNotSet": "Parámetro no recuperado",
|
||||
"problemCopyingImage": "No se puede copiar la imagen",
|
||||
"errorCopied": "Error al copiar",
|
||||
"baseModelChanged": "Modelo base cambiado",
|
||||
"addedToBoard": "Añadido al tablero",
|
||||
"addedToBoard": "Se agregó a los activos del panel {{name}}",
|
||||
"baseModelChangedCleared_one": "Borrado o desactivado {{count}} submodelo incompatible",
|
||||
"baseModelChangedCleared_many": "Borrados o desactivados {{count}} submodelos incompatibles",
|
||||
"baseModelChangedCleared_other": "Borrados o desactivados {{count}} submodelos incompatibles"
|
||||
"baseModelChangedCleared_other": "Borrados o desactivados {{count}} submodelos incompatibles",
|
||||
"addedToUncategorized": "Añadido a los activos del tablero $t(boards.uncategorized)",
|
||||
"imagesWillBeAddedTo": "Las imágenes subidas se añadirán a los activos del panel {{boardName}}.",
|
||||
"layerCopiedToClipboard": "Capa copiada en el portapapeles"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Activar la barra de progreso",
|
||||
@@ -226,7 +316,8 @@
|
||||
"mode": "Modo",
|
||||
"submitSupportTicket": "Enviar Ticket de Soporte",
|
||||
"toggleRightPanel": "Activar o desactivar el panel derecho (G)",
|
||||
"toggleLeftPanel": "Activar o desactivar el panel izquierdo (T)"
|
||||
"toggleLeftPanel": "Activar o desactivar el panel izquierdo (T)",
|
||||
"uploadImages": "Cargar imagen(es)"
|
||||
},
|
||||
"nodes": {
|
||||
"zoomInNodes": "Acercar",
|
||||
@@ -238,7 +329,8 @@
|
||||
"showMinimapnodes": "Mostrar el minimapa",
|
||||
"reloadNodeTemplates": "Recargar las plantillas de nodos",
|
||||
"loadWorkflow": "Cargar el flujo de trabajo",
|
||||
"downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON"
|
||||
"downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON",
|
||||
"boardAccessError": "No se puede encontrar el panel {{board_id}}, se está restableciendo al valor predeterminado"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Agregar panel automáticamente",
|
||||
@@ -255,7 +347,7 @@
|
||||
"bottomMessage": "Al eliminar este panel y las imágenes que contiene, se restablecerán las funciones que los estén utilizando actualmente.",
|
||||
"deleteBoardAndImages": "Borrar el panel y las imágenes",
|
||||
"loading": "Cargando...",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al Seleccionar 'Borrar Solo el Panel' transferirá las imágenes a un estado sin categorizar.",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al Seleccionar 'Borrar solo el panel' transferirá las imágenes a un estado sin categorizar.",
|
||||
"move": "Mover",
|
||||
"menuItemAutoAdd": "Agregar automáticamente a este panel",
|
||||
"searchBoard": "Buscando paneles…",
|
||||
@@ -263,29 +355,33 @@
|
||||
"downloadBoard": "Descargar panel",
|
||||
"deleteBoardOnly": "Borrar solo el panel",
|
||||
"myBoard": "Mi panel",
|
||||
"noMatching": "No hay paneles que coincidan",
|
||||
"noMatching": "Sin paneles coincidentes",
|
||||
"imagesWithCount_one": "{{count}} imagen",
|
||||
"imagesWithCount_many": "{{count}} imágenes",
|
||||
"imagesWithCount_other": "{{count}} imágenes",
|
||||
"assetsWithCount_one": "{{count}} activo",
|
||||
"assetsWithCount_many": "{{count}} activos",
|
||||
"assetsWithCount_other": "{{count}} activos",
|
||||
"hideBoards": "Ocultar Paneles",
|
||||
"addPrivateBoard": "Agregar un tablero privado",
|
||||
"addSharedBoard": "Agregar Panel Compartido",
|
||||
"hideBoards": "Ocultar paneles",
|
||||
"addPrivateBoard": "Agregar un panel privado",
|
||||
"addSharedBoard": "Añadir panel compartido",
|
||||
"boards": "Paneles",
|
||||
"archiveBoard": "Archivar Panel",
|
||||
"archiveBoard": "Archivar panel",
|
||||
"archived": "Archivado",
|
||||
"selectedForAutoAdd": "Seleccionado para agregar automáticamente",
|
||||
"unarchiveBoard": "Desarchivar el tablero",
|
||||
"noBoards": "No hay tableros {{boardType}}",
|
||||
"shared": "Carpetas compartidas",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Los tableros eliminados no se pueden restaurar. Al elegir \"Eliminar solo tablero\", las imágenes se colocan en un estado privado y sin categoría para el creador de la imagen."
|
||||
"unarchiveBoard": "Desarchivar el panel",
|
||||
"noBoards": "No hay paneles {{boardType}}",
|
||||
"shared": "Paneles compartidos",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al elegir \"Eliminar solo el panel\", las imágenes se colocan en un estado privado y sin categoría para el creador de la imagen.",
|
||||
"viewBoards": "Ver paneles",
|
||||
"private": "Paneles privados",
|
||||
"updateBoardError": "No se pudo actualizar el panel"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
"title": "Composición",
|
||||
"infillTab": "Relleno"
|
||||
"infillTab": "Relleno",
|
||||
"coherenceTab": "Parámetros de la coherencia"
|
||||
},
|
||||
"generation": {
|
||||
"title": "Generación"
|
||||
@@ -309,7 +405,10 @@
|
||||
"workflows": "Flujos de trabajo",
|
||||
"models": "Modelos",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)"
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"upscaling": "Upscaling",
|
||||
"gallery": "Galería",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
|
||||
}
|
||||
},
|
||||
"queue": {
|
||||
@@ -317,12 +416,81 @@
|
||||
"front": "Delante",
|
||||
"batchQueuedDesc_one": "Se agregó {{count}} sesión a {{direction}} la cola",
|
||||
"batchQueuedDesc_many": "Se agregaron {{count}} sesiones a {{direction}} la cola",
|
||||
"batchQueuedDesc_other": "Se agregaron {{count}} sesiones a {{direction}} la cola"
|
||||
"batchQueuedDesc_other": "Se agregaron {{count}} sesiones a {{direction}} la cola",
|
||||
"clearQueueAlertDialog": "Al vaciar la cola se cancela inmediatamente cualquier elemento de procesamiento y se vaciará la cola por completo. Los filtros pendientes se cancelarán.",
|
||||
"time": "Tiempo",
|
||||
"clearFailed": "Error al vaciar la cola",
|
||||
"cancelFailed": "Error al cancelar el elemento",
|
||||
"resumeFailed": "Error al reanudar el proceso",
|
||||
"pause": "Pausar",
|
||||
"pauseTooltip": "Pausar el proceso",
|
||||
"cancelBatchSucceeded": "Lote cancelado",
|
||||
"pruneSucceeded": "Se purgaron {{item_count}} elementos completados de la cola",
|
||||
"pruneFailed": "Error al purgar la cola",
|
||||
"cancelBatchFailed": "Error al cancelar los lotes",
|
||||
"pauseFailed": "Error al pausar el proceso",
|
||||
"status": "Estado",
|
||||
"origin": "Origen",
|
||||
"destination": "Destino",
|
||||
"generations_one": "Generación",
|
||||
"generations_many": "Generaciones",
|
||||
"generations_other": "Generaciones",
|
||||
"resume": "Reanudar",
|
||||
"queueEmpty": "Cola vacía",
|
||||
"cancelItem": "Cancelar elemento",
|
||||
"cancelBatch": "Cancelar lote",
|
||||
"openQueue": "Abrir la cola",
|
||||
"completed": "Completado",
|
||||
"enqueueing": "Añadir lotes a la cola",
|
||||
"clear": "Limpiar",
|
||||
"pauseSucceeded": "Proceso pausado",
|
||||
"resumeSucceeded": "Proceso reanudado",
|
||||
"resumeTooltip": "Reanudar proceso",
|
||||
"cancel": "Cancelar",
|
||||
"cancelTooltip": "Cancelar artículo actual",
|
||||
"pruneTooltip": "Purgar {{item_count}} elementos completados",
|
||||
"batchQueued": "Lote en cola",
|
||||
"pending": "Pendiente",
|
||||
"item": "Elemento",
|
||||
"total": "Total",
|
||||
"in_progress": "En proceso",
|
||||
"failed": "Fallido",
|
||||
"completedIn": "Completado en",
|
||||
"upscaling": "Upscaling",
|
||||
"canvas": "Lienzo",
|
||||
"generation": "Generación",
|
||||
"workflows": "Flujo de trabajo",
|
||||
"other": "Otro",
|
||||
"queueFront": "Añadir al principio de la cola",
|
||||
"gallery": "Galería",
|
||||
"batchFieldValues": "Valores de procesamiento por lotes",
|
||||
"session": "Sesión",
|
||||
"notReady": "La cola aún no está lista",
|
||||
"graphQueued": "Gráfico en cola",
|
||||
"clearQueueAlertDialog2": "¿Estás seguro que deseas vaciar la cola?",
|
||||
"next": "Siguiente",
|
||||
"iterations_one": "Interacción",
|
||||
"iterations_many": "Interacciones",
|
||||
"iterations_other": "Interacciones",
|
||||
"current": "Actual",
|
||||
"queue": "Cola",
|
||||
"queueBack": "Añadir a la cola",
|
||||
"cancelSucceeded": "Elemento cancelado",
|
||||
"clearTooltip": "Cancelar y limpiar todos los elementos",
|
||||
"clearSucceeded": "Cola vaciada",
|
||||
"canceled": "Cancelado",
|
||||
"batch": "Lote",
|
||||
"graphFailedToQueue": "Error al poner el gráfico en cola",
|
||||
"batchFailedToQueue": "Error al poner en cola el lote",
|
||||
"prompts_one": "Prompt",
|
||||
"prompts_many": "Prompts",
|
||||
"prompts_other": "Prompts",
|
||||
"prune": "Eliminar"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invitar compañeros de equipo",
|
||||
"shareAccess": "Compartir acceso",
|
||||
"professionalUpsell": "Disponible en la edición profesional de Invoke. Haz clic aquí o visita invoke.com/pricing para obtener más detalles."
|
||||
"professionalUpsell": "Disponible en la edición profesional de Invoke. Haga clic aquí o visite invoke.com/pricing para obtener más detalles."
|
||||
},
|
||||
"controlLayers": {
|
||||
"layer_one": "Capa",
|
||||
@@ -330,6 +498,415 @@
|
||||
"layer_other": "Capas",
|
||||
"layer_withCount_one": "({{count}}) capa",
|
||||
"layer_withCount_many": "({{count}}) capas",
|
||||
"layer_withCount_other": "({{count}}) capas"
|
||||
"layer_withCount_other": "({{count}}) capas",
|
||||
"copyToClipboard": "Copiar al portapapeles"
|
||||
},
|
||||
"whatsNew": {
|
||||
"readReleaseNotes": "Leer las notas de la versión",
|
||||
"watchRecentReleaseVideos": "Ver videos de versiones recientes",
|
||||
"watchUiUpdatesOverview": "Descripción general de las actualizaciones de la interfaz de usuario de Watch",
|
||||
"whatsNewInInvoke": "Novedades en Invoke",
|
||||
"items": [
|
||||
"<StrongComponent>SD 3.5</StrongComponent>: compatibilidad con SD 3.5 Medium y Large.",
|
||||
"<StrongComponent>Lienzo</StrongComponent>: Se ha simplificado el procesamiento de la capa de control y se ha mejorado la configuración predeterminada del control."
|
||||
]
|
||||
},
|
||||
"invocationCache": {
|
||||
"enableFailed": "Error al activar la cache",
|
||||
"cacheSize": "Tamaño de la caché",
|
||||
"hits": "Accesos a la caché",
|
||||
"invocationCache": "Caché",
|
||||
"misses": "Errores de la caché",
|
||||
"clear": "Limpiar",
|
||||
"maxCacheSize": "Tamaño máximo de la caché",
|
||||
"enableSucceeded": "Cache activada",
|
||||
"clearFailed": "Error al borrar la cache",
|
||||
"enable": "Activar",
|
||||
"useCache": "Uso de la caché",
|
||||
"disableSucceeded": "Caché desactivada",
|
||||
"clearSucceeded": "Caché borrada",
|
||||
"disable": "Desactivar",
|
||||
"disableFailed": "Error al desactivar la caché"
|
||||
},
|
||||
"hrf": {
|
||||
"hrf": "Solución de alta resolución",
|
||||
"enableHrf": "Activar corrección de alta resolución",
|
||||
"metadata": {
|
||||
"enabled": "Corrección de alta resolución activada",
|
||||
"strength": "Forzar la corrección de alta resolución",
|
||||
"method": "Método de corrección de alta resolución"
|
||||
},
|
||||
"upscaleMethod": "Método de expansión"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Añadir activador de los avisos",
|
||||
"compatibleEmbeddings": "Incrustaciones compatibles",
|
||||
"noMatchingTriggers": "No hay activadores coincidentes"
|
||||
},
|
||||
"hotkeys": {
|
||||
"hotkeys": "Atajo del teclado",
|
||||
"canvas": {
|
||||
"selectViewTool": {
|
||||
"desc": "Selecciona la herramienta de Visualización.",
|
||||
"title": "Visualización"
|
||||
},
|
||||
"cancelFilter": {
|
||||
"title": "Cancelar el filtro",
|
||||
"desc": "Cancelar el filtro pendiente."
|
||||
},
|
||||
"applyTransform": {
|
||||
"title": "Aplicar la transformación",
|
||||
"desc": "Aplicar la transformación pendiente a la capa seleccionada."
|
||||
},
|
||||
"applyFilter": {
|
||||
"desc": "Aplicar el filtro pendiente a la capa seleccionada.",
|
||||
"title": "Aplicar filtro"
|
||||
},
|
||||
"selectBrushTool": {
|
||||
"title": "Pincel",
|
||||
"desc": "Selecciona la herramienta pincel."
|
||||
},
|
||||
"selectBboxTool": {
|
||||
"desc": "Seleccionar la herramienta de selección del marco.",
|
||||
"title": "Selección del marco"
|
||||
},
|
||||
"selectMoveTool": {
|
||||
"desc": "Selecciona la herramienta Mover.",
|
||||
"title": "Mover"
|
||||
},
|
||||
"selectRectTool": {
|
||||
"title": "Rectángulo",
|
||||
"desc": "Selecciona la herramienta Rectángulo."
|
||||
},
|
||||
"decrementToolWidth": {
|
||||
"title": "Reducir el ancho de la herramienta",
|
||||
"desc": "Disminuye la anchura de la herramienta pincel o goma de borrar, según la que esté seleccionada."
|
||||
},
|
||||
"incrementToolWidth": {
|
||||
"title": "Incrementar la anchura de la herramienta",
|
||||
"desc": "Aumenta la anchura de la herramienta pincel o goma de borrar, según la que esté seleccionada."
|
||||
},
|
||||
"fitBboxToCanvas": {
|
||||
"title": "Ajustar bordes al lienzo",
|
||||
"desc": "Escala y posiciona la vista para ajustarla a los bodes."
|
||||
},
|
||||
"fitLayersToCanvas": {
|
||||
"title": "Ajustar capas al lienzo",
|
||||
"desc": "Escala y posiciona la vista para que se ajuste a todas las capas visibles."
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "Establecer color en blanco",
|
||||
"desc": "Establece el color actual de la herramienta en blanco."
|
||||
},
|
||||
"resetSelected": {
|
||||
"title": "Restablecer capa",
|
||||
"desc": "Restablecer la capa seleccionada. Solo se aplica a Máscara de retoque y Guía regional."
|
||||
},
|
||||
"setZoomTo400Percent": {
|
||||
"desc": "Ajuste la aplicación del lienzo al 400%.",
|
||||
"title": "Ampliar al 400%"
|
||||
},
|
||||
"transformSelected": {
|
||||
"desc": "Transformar la capa seleccionada.",
|
||||
"title": "Transformar"
|
||||
},
|
||||
"selectColorPickerTool": {
|
||||
"title": "Selector de color",
|
||||
"desc": "Seleccione la herramienta de selección de color."
|
||||
},
|
||||
"selectEraserTool": {
|
||||
"title": "Borrador",
|
||||
"desc": "Selecciona la herramienta Borrador."
|
||||
},
|
||||
"setZoomTo100Percent": {
|
||||
"title": "Ampliar al 100%",
|
||||
"desc": "Ajuste ampliar el lienzo al 100%."
|
||||
},
|
||||
"undo": {
|
||||
"title": "Deshacer",
|
||||
"desc": "Deshacer la última acción en el lienzo."
|
||||
},
|
||||
"nextEntity": {
|
||||
"desc": "Seleccione la siguiente capa de la lista.",
|
||||
"title": "Capa siguiente"
|
||||
},
|
||||
"redo": {
|
||||
"title": "Rehacer",
|
||||
"desc": "Rehacer la última acción en el lienzo."
|
||||
},
|
||||
"prevEntity": {
|
||||
"title": "Capa anterior",
|
||||
"desc": "Seleccione la capa anterior de la lista."
|
||||
},
|
||||
"title": "Lienzo",
|
||||
"setZoomTo200Percent": {
|
||||
"title": "Ampliar al 200%",
|
||||
"desc": "Ajuste la ampliación del lienzo al 200%."
|
||||
},
|
||||
"setZoomTo800Percent": {
|
||||
"title": "Ampliar al 800%",
|
||||
"desc": "Ajuste la ampliación del lienzo al 800%."
|
||||
},
|
||||
"filterSelected": {
|
||||
"desc": "Filtra la capa seleccionada. Solo se aplica a las capas Ráster y Control.",
|
||||
"title": "Filtrar"
|
||||
},
|
||||
"cancelTransform": {
|
||||
"title": "Cancelar transformación",
|
||||
"desc": "Cancelar la transformación pendiente."
|
||||
},
|
||||
"deleteSelected": {
|
||||
"title": "Borrar la capa",
|
||||
"desc": "Borrar la capa seleccionada."
|
||||
},
|
||||
"quickSwitch": {
|
||||
"desc": "Cambiar entre las dos últimas capas seleccionadas. Si una capa está seleccionada, cambia siempre entre ella y la última capa no seleccionada.",
|
||||
"title": "Cambio rápido de capa"
|
||||
}
|
||||
},
|
||||
"app": {
|
||||
"selectModelsTab": {
|
||||
"title": "Seleccione la pestaña Modelos",
|
||||
"desc": "Selecciona la pestaña Modelos."
|
||||
},
|
||||
"focusPrompt": {
|
||||
"desc": "Mueve el foco del cursor a la indicación positiva.",
|
||||
"title": "Enfoque"
|
||||
},
|
||||
"toggleLeftPanel": {
|
||||
"title": "Alternar panel izquierdo",
|
||||
"desc": "Mostrar u ocultar el panel izquierdo."
|
||||
},
|
||||
"selectQueueTab": {
|
||||
"title": "Seleccione la pestaña Cola",
|
||||
"desc": "Seleccione la pestaña Cola."
|
||||
},
|
||||
"selectCanvasTab": {
|
||||
"title": "Seleccione la pestaña Lienzo",
|
||||
"desc": "Selecciona la pestaña Lienzo."
|
||||
},
|
||||
"clearQueue": {
|
||||
"title": "Vaciar cola",
|
||||
"desc": "Cancelar y variar todos los elementos de la cola."
|
||||
},
|
||||
"selectUpscalingTab": {
|
||||
"title": "Selecciona la pestaña Ampliar",
|
||||
"desc": "Selecciona la pestaña Aumento de escala."
|
||||
},
|
||||
"togglePanels": {
|
||||
"desc": "Muestra u oculta los paneles izquierdo y derecho a la vez.",
|
||||
"title": "Alternar paneles"
|
||||
},
|
||||
"toggleRightPanel": {
|
||||
"title": "Alternar panel derecho",
|
||||
"desc": "Mostrar u ocultar el panel derecho."
|
||||
},
|
||||
"invokeFront": {
|
||||
"desc": "Pone en cola la solicitud de compilación y la agrega al principio de la cola.",
|
||||
"title": "Invocar (frente)"
|
||||
},
|
||||
"cancelQueueItem": {
|
||||
"title": "Cancelar",
|
||||
"desc": "Cancelar el elemento de la cola que se está procesando."
|
||||
},
|
||||
"invoke": {
|
||||
"desc": "Pone en cola la solicitud de compilación y la agrega al final de la cola.",
|
||||
"title": "Invocar"
|
||||
},
|
||||
"title": "Aplicación",
|
||||
"selectWorkflowsTab": {
|
||||
"title": "Seleccione la pestaña Flujos de trabajo",
|
||||
"desc": "Selecciona la pestaña Flujos de trabajo."
|
||||
},
|
||||
"resetPanelLayout": {
|
||||
"title": "Reiniciar la posición del panel",
|
||||
"desc": "Restablece los paneles izquierdo y derecho a su tamaño y disposición por defecto."
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
"addNode": {
|
||||
"title": "Añadir nodo",
|
||||
"desc": "Abrir añadir nodo."
|
||||
},
|
||||
"selectAll": {
|
||||
"title": "Seleccionar todo",
|
||||
"desc": "Seleccione todos los nodos y enlaces."
|
||||
},
|
||||
"deleteSelection": {
|
||||
"desc": "Borrar todos los nodos y enlaces seleccionados.",
|
||||
"title": "Borrar"
|
||||
},
|
||||
"undo": {
|
||||
"desc": "Deshaga la última acción.",
|
||||
"title": "Deshacer"
|
||||
},
|
||||
"redo": {
|
||||
"desc": "Rehacer la última acción.",
|
||||
"title": "Rehacer"
|
||||
},
|
||||
"pasteSelection": {
|
||||
"desc": "Pegar nodos y bordes copiados.",
|
||||
"title": "Pegar"
|
||||
},
|
||||
"title": "Flujos de trabajo",
|
||||
"copySelection": {
|
||||
"desc": "Copiar nodos y bordes seleccionados.",
|
||||
"title": "Copiar"
|
||||
},
|
||||
"pasteSelectionWithEdges": {
|
||||
"desc": "Pega los nodos copiados, los enlaces y todos los enlaces conectados a los nodos copiados.",
|
||||
"title": "Pegar con enlaces"
|
||||
}
|
||||
},
|
||||
"viewer": {
|
||||
"useSize": {
|
||||
"title": "Usar dimensiones",
|
||||
"desc": "Utiliza las dimensiones de la imagen actual como el tamaño del borde."
|
||||
},
|
||||
"remix": {
|
||||
"title": "Remezcla",
|
||||
"desc": "Recupera todos los metadatos excepto la semilla de la imagen actual."
|
||||
},
|
||||
"loadWorkflow": {
|
||||
"desc": "Carga el flujo de trabajo guardado de la imagen actual (si tiene uno).",
|
||||
"title": "Cargar flujo de trabajo"
|
||||
},
|
||||
"recallAll": {
|
||||
"desc": "Recupera todos los metadatos de la imagen actual.",
|
||||
"title": "Recuperar todos los metadatos"
|
||||
},
|
||||
"recallPrompts": {
|
||||
"desc": "Recuerde las indicaciones positivas y negativas de la imagen actual.",
|
||||
"title": "Recordatorios"
|
||||
},
|
||||
"recallSeed": {
|
||||
"title": "Recuperar semilla",
|
||||
"desc": "Recupera la semilla de la imagen actual."
|
||||
},
|
||||
"runPostprocessing": {
|
||||
"title": "Ejecutar posprocesamiento",
|
||||
"desc": "Ejecutar el posprocesamiento seleccionado en la imagen actual."
|
||||
},
|
||||
"toggleMetadata": {
|
||||
"title": "Mostrar/ocultar los metadatos",
|
||||
"desc": "Mostrar u ocultar la superposición de metadatos de la imagen actual."
|
||||
},
|
||||
"nextComparisonMode": {
|
||||
"desc": "Desplácese por los modos de comparación.",
|
||||
"title": "Siguiente comparación"
|
||||
},
|
||||
"title": "Visor de imágenes",
|
||||
"toggleViewer": {
|
||||
"title": "Mostrar/Ocultar el visor de imágenes",
|
||||
"desc": "Mostrar u ocultar el visor de imágenes. Solo disponible en la pestaña Lienzo."
|
||||
},
|
||||
"swapImages": {
|
||||
"title": "Intercambiar imágenes en la comparación",
|
||||
"desc": "Intercambia las imágenes que se están comparando."
|
||||
}
|
||||
},
|
||||
"gallery": {
|
||||
"clearSelection": {
|
||||
"title": "Limpiar selección",
|
||||
"desc": "Borrar la selección actual, si hay alguna."
|
||||
},
|
||||
"galleryNavUp": {
|
||||
"title": "Subir",
|
||||
"desc": "Navega hacia arriba en la cuadrícula de la galería y selecciona esa imagen. Si estás en la parte superior de la página, ve a la página anterior."
|
||||
},
|
||||
"galleryNavLeft": {
|
||||
"title": "Izquierda",
|
||||
"desc": "Navegue hacia la izquierda en la rejilla de la galería, seleccionando esa imagen. Si está en la primera imagen de la fila, vaya a la fila anterior. Si está en la primera imagen de la página, vaya a la página anterior."
|
||||
},
|
||||
"galleryNavDown": {
|
||||
"title": "Bajar",
|
||||
"desc": "Navegue hacia abajo en la parrilla de la galería, seleccionando esa imagen. Si se encuentra al final de la página, vaya a la página siguiente."
|
||||
},
|
||||
"galleryNavRight": {
|
||||
"title": "A la derecha",
|
||||
"desc": "Navegue hacia la derecha en la rejilla de la galería, seleccionando esa imagen. Si está en la última imagen de la fila, vaya a la fila siguiente. Si está en la última imagen de la página, vaya a la página siguiente."
|
||||
},
|
||||
"galleryNavUpAlt": {
|
||||
"desc": "Igual que arriba, pero selecciona la imagen de comparación, abriendo el modo de comparación si no está ya abierto.",
|
||||
"title": "Arriba (Comparar imagen)"
|
||||
},
|
||||
"deleteSelection": {
|
||||
"desc": "Borrar todas las imágenes seleccionadas. Por defecto, se le pedirá que confirme la eliminación. Si las imágenes están actualmente en uso en la aplicación, se te avisará.",
|
||||
"title": "Borrar"
|
||||
},
|
||||
"title": "Galería",
|
||||
"selectAllOnPage": {
|
||||
"title": "Seleccionar todo en la página",
|
||||
"desc": "Seleccionar todas las imágenes en la página actual."
|
||||
}
|
||||
},
|
||||
"searchHotkeys": "Buscar teclas de acceso rápido",
|
||||
"noHotkeysFound": "Sin teclas de acceso rápido",
|
||||
"clearSearch": "Limpiar la búsqueda"
|
||||
},
|
||||
"metadata": {
|
||||
"guidance": "Orientación",
|
||||
"createdBy": "Creado por",
|
||||
"noImageDetails": "Sin detalles en la imagen",
|
||||
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
|
||||
"height": "Altura",
|
||||
"imageDimensions": "Dimensiones de la imagen",
|
||||
"seamlessXAxis": "Eje X sin juntas",
|
||||
"seamlessYAxis": "Eje Y sin juntas",
|
||||
"generationMode": "Modo de generación",
|
||||
"scheduler": "Programador",
|
||||
"width": "Ancho",
|
||||
"Threshold": "Umbral de ruido",
|
||||
"canvasV2Metadata": "Lienzo",
|
||||
"metadata": "Metadatos",
|
||||
"model": "Modelo",
|
||||
"allPrompts": "Todas las indicaciones",
|
||||
"cfgScale": "Escala CFG",
|
||||
"imageDetails": "Detalles de la imagen",
|
||||
"negativePrompt": "Indicación negativa",
|
||||
"noMetaData": "Sin metadatos",
|
||||
"parameterSet": "Parámetro {{parameter}} establecido",
|
||||
"vae": "Autocodificador",
|
||||
"workflow": "Flujo de trabajo",
|
||||
"seed": "Semilla",
|
||||
"strength": "Forzar imagen a imagen",
|
||||
"recallParameters": "Parámetros de recuperación",
|
||||
"recallParameter": "Recuperar {{label}}",
|
||||
"steps": "Pasos",
|
||||
"noRecallParameters": "Sin parámetros para recuperar",
|
||||
"parsingFailed": "Error al analizar"
|
||||
},
|
||||
"system": {
|
||||
"logLevel": {
|
||||
"debug": "Depurar",
|
||||
"info": "Información",
|
||||
"warn": "Advertir",
|
||||
"fatal": "Grave",
|
||||
"error": "Error",
|
||||
"trace": "Rastro",
|
||||
"logLevel": "Nivel del registro"
|
||||
},
|
||||
"enableLogging": "Activar registro",
|
||||
"logNamespaces": {
|
||||
"workflows": "Flujos de trabajo",
|
||||
"system": "Sistema",
|
||||
"metadata": "Metadatos",
|
||||
"gallery": "Galería",
|
||||
"logNamespaces": "Espacios para los nombres de registro",
|
||||
"generation": "Generación",
|
||||
"events": "Eventos",
|
||||
"canvas": "Lienzo",
|
||||
"config": "Ajustes",
|
||||
"models": "Modelos",
|
||||
"queue": "Cola"
|
||||
}
|
||||
},
|
||||
"newUserExperience": {
|
||||
"downloadStarterModels": "Descargar modelos de inicio",
|
||||
"toGetStarted": "Para empezar, introduzca un mensaje en el cuadro y haga clic en <StrongComponent>Invocar</StrongComponent> para generar su primera imagen. Seleccione una plantilla para mejorar los resultados. Puede elegir guardar sus imágenes directamente en <StrongComponent>Galería</StrongComponent> o editarlas en <StrongComponent>Lienzo</StrongComponent>.",
|
||||
"importModels": "Importar modelos",
|
||||
"noModelsInstalled": "Parece que no tienes ningún modelo instalado",
|
||||
"gettingStartedSeries": "¿Desea más orientación? Consulte nuestra <LinkComponent>Serie de introducción</LinkComponent> para obtener consejos sobre cómo aprovechar todo el potencial de Invoke Studio.",
|
||||
"toGetStartedLocal": "Para empezar, asegúrate de descargar o importar los modelos necesarios para ejecutar Invoke. A continuación, introduzca un mensaje en el cuadro y haga clic en <StrongComponent>Invocar</StrongComponent> para generar su primera imagen. Seleccione una plantilla para mejorar los resultados. Puede elegir guardar sus imágenes directamente en <StrongComponent>Galería</StrongComponent> o editarlas en el <StrongComponent>Lienzo</StrongComponent>."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1985,7 +1985,6 @@
|
||||
"inpaintMask_withCount_many": "Remplir les masques",
|
||||
"inpaintMask_withCount_other": "Remplir les masques",
|
||||
"newImg2ImgCanvasFromImage": "Nouvelle Img2Img à partir de l'image",
|
||||
"resetCanvas": "Réinitialiser la Toile",
|
||||
"bboxOverlay": "Afficher la superposition des Bounding Box",
|
||||
"moveToFront": "Déplacer vers le permier plan",
|
||||
"moveToBack": "Déplacer vers l'arrière plan",
|
||||
@@ -2034,7 +2033,6 @@
|
||||
"help2": "Commencez par un point <Bold>Inclure</Bold> au sein de l'objet cible. Ajoutez d'autres points pour affiner la sélection. Moins de points produisent généralement de meilleurs résultats.",
|
||||
"help3": "Inversez la sélection pour sélectionner tout sauf l'objet cible."
|
||||
},
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) en tant que $t(controlLayers.controlLayer)",
|
||||
"convertRegionalGuidanceTo": "Convertir $t(controlLayers.regionalGuidance) vers",
|
||||
"copyRasterLayerTo": "Copier $t(controlLayers.rasterLayer) vers",
|
||||
"newControlLayer": "Nouveau $t(controlLayers.controlLayer)",
|
||||
@@ -2044,8 +2042,7 @@
|
||||
"convertInpaintMaskTo": "Convertir $t(controlLayers.inpaintMask) vers",
|
||||
"copyControlLayerTo": "Copier $t(controlLayers.controlLayer) vers",
|
||||
"newInpaintMask": "Nouveau $t(controlLayers.inpaintMask)",
|
||||
"newRasterLayer": "Nouveau $t(controlLayers.rasterLayer)",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) en tant que $t(controlLayers.rasterLayer)"
|
||||
"newRasterLayer": "Nouveau $t(controlLayers.rasterLayer)"
|
||||
},
|
||||
"upscaling": {
|
||||
"exceedsMaxSizeDetails": "La limite maximale d'agrandissement est de {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixels. Veuillez essayer une image plus petite ou réduire votre sélection d'échelle.",
|
||||
|
||||
@@ -94,7 +94,9 @@
|
||||
"view": "Vista",
|
||||
"close": "Chiudi",
|
||||
"clipboard": "Appunti",
|
||||
"ok": "Ok"
|
||||
"ok": "Ok",
|
||||
"generating": "Generazione",
|
||||
"loadingModel": "Caricamento del modello"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -597,7 +599,18 @@
|
||||
"huggingFace": "HuggingFace",
|
||||
"huggingFaceRepoID": "HuggingFace Repository ID",
|
||||
"clipEmbed": "CLIP Embed",
|
||||
"t5Encoder": "T5 Encoder"
|
||||
"t5Encoder": "T5 Encoder",
|
||||
"hfTokenInvalidErrorMessage": "Gettone HuggingFace non valido o mancante.",
|
||||
"hfTokenRequired": "Stai tentando di scaricare un modello che richiede un gettone HuggingFace valido.",
|
||||
"hfTokenUnableToVerifyErrorMessage": "Impossibile verificare il gettone HuggingFace. Ciò è probabilmente dovuto a un errore di rete. Riprova più tardi.",
|
||||
"hfTokenHelperText": "Per utilizzare alcuni modelli è necessario un gettone HF. Fai clic qui per creare o ottenere il tuo gettone.",
|
||||
"hfTokenInvalid": "Gettone HF non valido o mancante",
|
||||
"hfTokenUnableToVerify": "Impossibile verificare il gettone HF",
|
||||
"hfTokenSaved": "Gettone HF salvato",
|
||||
"hfForbidden": "Non hai accesso a questo modello HF",
|
||||
"hfTokenLabel": "Gettone HuggingFace (richiesto per alcuni modelli)",
|
||||
"hfForbiddenErrorMessage": "Consigliamo di visitare la pagina del repository su HuggingFace.com. Il proprietario potrebbe richiedere l'accettazione dei termini per poter effettuare il download.",
|
||||
"hfTokenInvalidErrorMessage2": "Aggiornalo in "
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -699,7 +712,9 @@
|
||||
"staged": "Maschera espansa",
|
||||
"optimizedImageToImage": "Immagine-a-immagine ottimizzata",
|
||||
"sendToCanvas": "Invia alla Tela",
|
||||
"coherenceMinDenoise": "Riduzione minima del rumore"
|
||||
"coherenceMinDenoise": "Min rid. rumore",
|
||||
"recallMetadata": "Richiama i metadati",
|
||||
"disabledNoRasterContent": "Disabilitato (nessun contenuto Raster)"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelli",
|
||||
@@ -737,7 +752,8 @@
|
||||
"confirmOnNewSession": "Conferma su nuova sessione",
|
||||
"enableModelDescriptions": "Abilita le descrizioni dei modelli nei menu a discesa",
|
||||
"modelDescriptionsDisabled": "Descrizioni dei modelli nei menu a discesa disabilitate",
|
||||
"modelDescriptionsDisabledDesc": "Le descrizioni dei modelli nei menu a discesa sono state disabilitate. Abilitale nelle Impostazioni."
|
||||
"modelDescriptionsDisabledDesc": "Le descrizioni dei modelli nei menu a discesa sono state disabilitate. Abilitale nelle Impostazioni.",
|
||||
"showDetailedInvocationProgress": "Mostra dettagli avanzamento"
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "Caricamento fallito",
|
||||
@@ -1263,8 +1279,9 @@
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
"paragraphs": [
|
||||
"Quanto rumore viene aggiunto all'immagine in ingresso.",
|
||||
"0 risulterà in un'immagine identica, mentre 1 risulterà in un'immagine completamente nuova."
|
||||
"Controlla la differenza tra l'immagine generata e il/i livello/i raster.",
|
||||
"Una forza inferiore rimane più vicina ai livelli raster visibili combinati. Una forza superiore si basa maggiormente sul prompt globale.",
|
||||
"Se non sono presenti livelli raster con contenuto visibile, questa impostazione viene ignorata."
|
||||
],
|
||||
"heading": "Forza di riduzione del rumore"
|
||||
},
|
||||
@@ -1276,7 +1293,7 @@
|
||||
},
|
||||
"infillMethod": {
|
||||
"paragraphs": [
|
||||
"Metodo di riempimento durante il processo di Outpainting o Inpainting."
|
||||
"Metodo di riempimento durante il processo di Outpaint o Inpaint."
|
||||
],
|
||||
"heading": "Metodo di riempimento"
|
||||
},
|
||||
@@ -1444,7 +1461,7 @@
|
||||
"heading": "Livello minimo di riduzione del rumore",
|
||||
"paragraphs": [
|
||||
"Intensità minima di riduzione rumore per la modalità di Coerenza",
|
||||
"L'intensità minima di riduzione del rumore per la regione di coerenza durante l'inpainting o l'outpainting"
|
||||
"L'intensità minima di riduzione del rumore per la regione di coerenza durante l'inpaint o l'outpaint"
|
||||
]
|
||||
},
|
||||
"compositingMaskBlur": {
|
||||
@@ -1498,7 +1515,7 @@
|
||||
"optimizedDenoising": {
|
||||
"heading": "Immagine-a-immagine ottimizzata",
|
||||
"paragraphs": [
|
||||
"Abilita 'Immagine-a-immagine ottimizzata' per una scala di riduzione del rumore più graduale per le trasformazioni da immagine a immagine e di inpainting con modelli Flux. Questa impostazione migliora la capacità di controllare la quantità di modifica applicata a un'immagine, ma può essere disattivata se preferisci usare la scala di riduzione rumore standard. Questa impostazione è ancora in fase di messa a punto ed è in stato beta."
|
||||
"Abilita 'Immagine-a-immagine ottimizzata' per una scala di riduzione del rumore più graduale per le trasformazioni da immagine a immagine e di inpaint con modelli Flux. Questa impostazione migliora la capacità di controllare la quantità di modifica applicata a un'immagine, ma può essere disattivata se preferisci usare la scala di riduzione rumore standard. Questa impostazione è ancora in fase di messa a punto ed è in stato beta."
|
||||
]
|
||||
},
|
||||
"paramGuidance": {
|
||||
@@ -1733,8 +1750,7 @@
|
||||
"newRegionalReferenceImageError": "Problema nella creazione dell'immagine di riferimento regionale",
|
||||
"newControlLayerOk": "Livello di controllo creato",
|
||||
"bboxOverlay": "Mostra sovrapposizione riquadro",
|
||||
"resetCanvas": "Reimposta la tela",
|
||||
"outputOnlyMaskedRegions": "Solo regioni mascherate in uscita",
|
||||
"outputOnlyMaskedRegions": "In uscita solo le regioni generate",
|
||||
"enableAutoNegative": "Abilita Auto Negativo",
|
||||
"disableAutoNegative": "Disabilita Auto Negativo",
|
||||
"showHUD": "Mostra HUD",
|
||||
@@ -1771,7 +1787,7 @@
|
||||
"globalReferenceImage_withCount_many": "Immagini di riferimento Globali",
|
||||
"globalReferenceImage_withCount_other": "Immagini di riferimento Globali",
|
||||
"controlMode": {
|
||||
"balanced": "Bilanciato",
|
||||
"balanced": "Bilanciato (consigliato)",
|
||||
"controlMode": "Modalità di controllo",
|
||||
"prompt": "Prompt",
|
||||
"control": "Controllo",
|
||||
@@ -1782,7 +1798,7 @@
|
||||
"beginEndStepPercentShort": "Inizio/Fine %",
|
||||
"stagingOnCanvas": "Genera immagini nella",
|
||||
"ipAdapterMethod": {
|
||||
"full": "Completo",
|
||||
"full": "Stile e Composizione",
|
||||
"style": "Solo Stile",
|
||||
"composition": "Solo Composizione",
|
||||
"ipAdapterMethod": "Metodo Adattatore IP"
|
||||
@@ -1881,7 +1897,10 @@
|
||||
"lineart_anime_edge_detection": {
|
||||
"description": "Genera una mappa dei bordi dal livello selezionato utilizzando il modello di rilevamento dei bordi Lineart Anime.",
|
||||
"label": "Rilevamento bordi Lineart Anime"
|
||||
}
|
||||
},
|
||||
"forMoreControl": "Per un maggiore controllo, fare clic su Avanzate qui sotto.",
|
||||
"advanced": "Avanzate",
|
||||
"processingLayerWith": "Elaborazione del livello con il filtro {{type}}."
|
||||
},
|
||||
"controlLayers_withCount_hidden": "Livelli di controllo ({{count}} nascosti)",
|
||||
"regionalGuidance_withCount_hidden": "Guida regionale ({{count}} nascosti)",
|
||||
@@ -2016,8 +2035,6 @@
|
||||
"convertControlLayerTo": "Converti $t(controlLayers.controlLayer) in",
|
||||
"newRasterLayer": "Nuovo $t(controlLayers.rasterLayer)",
|
||||
"newRegionalGuidance": "Nuova $t(controlLayers.regionalGuidance)",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) come $t(controlLayers.rasterLayer)",
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) come $t(controlLayers.controlLayer)",
|
||||
"convertInpaintMaskTo": "Converti $t(controlLayers.inpaintMask) in",
|
||||
"copyRegionalGuidanceTo": "Copia $t(controlLayers.regionalGuidance) in",
|
||||
"convertRasterLayerTo": "Converti $t(controlLayers.rasterLayer) in",
|
||||
@@ -2025,7 +2042,9 @@
|
||||
"newControlLayer": "Nuovo $t(controlLayers.controlLayer)",
|
||||
"newInpaintMask": "Nuova $t(controlLayers.inpaintMask)",
|
||||
"replaceCurrent": "Sostituisci corrente",
|
||||
"mergeDown": "Unire in basso"
|
||||
"mergeDown": "Unire in basso",
|
||||
"mergingLayers": "Unione dei livelli",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare."
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2057,7 +2076,9 @@
|
||||
"postProcessingMissingModelWarning": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare un modello di post-elaborazione (da immagine a immagine).",
|
||||
"exceedsMaxSize": "Le impostazioni di ampliamento superano il limite massimo delle dimensioni",
|
||||
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata.",
|
||||
"upscale": "Amplia"
|
||||
"upscale": "Amplia",
|
||||
"incompatibleBaseModel": "Architettura del modello principale non supportata per l'ampliamento",
|
||||
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento."
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invita collaboratori",
|
||||
@@ -2119,12 +2140,13 @@
|
||||
},
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "Novità in Invoke",
|
||||
"line2": "Supporto Flux esteso, ora con immagini di riferimento globali",
|
||||
"line3": "Tooltip e menu contestuali migliorati",
|
||||
"readReleaseNotes": "Leggi le note di rilascio",
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"line1": "Strumento <ItalicComponent>Seleziona oggetto</ItalicComponent> per la selezione e la modifica precise degli oggetti",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia"
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"<StrongComponent>SD 3.5</StrongComponent>: supporto per SD 3.5 Medium e Large.",
|
||||
"<StrongComponent>Tela</StrongComponent>: elaborazione semplificata del livello di controllo e impostazioni di controllo predefinite migliorate."
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
"logLevel": {
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"discordLabel": "Discord",
|
||||
"nodes": "ワークフロー",
|
||||
"txt2img": "txt2img",
|
||||
"postprocessing": "Post Processing",
|
||||
"postprocessing": "ポストプロセス",
|
||||
"t2iAdapter": "T2I アダプター",
|
||||
"communityLabel": "コミュニティ",
|
||||
"dontAskMeAgain": "次回から確認しない",
|
||||
@@ -71,8 +71,8 @@
|
||||
"orderBy": "並び順:",
|
||||
"enabled": "有効",
|
||||
"notInstalled": "未インストール",
|
||||
"positivePrompt": "プロンプト",
|
||||
"negativePrompt": "除外する要素",
|
||||
"positivePrompt": "ポジティブプロンプト",
|
||||
"negativePrompt": "ネガティブプロンプト",
|
||||
"selected": "選択済み",
|
||||
"aboutDesc": "Invokeを業務で利用する場合はマークしてください:",
|
||||
"beta": "ベータ",
|
||||
@@ -80,7 +80,20 @@
|
||||
"editor": "エディタ",
|
||||
"safetensors": "Safetensors",
|
||||
"tab": "タブ",
|
||||
"toResolve": "解決方法"
|
||||
"toResolve": "解決方法",
|
||||
"openInViewer": "ビューアで開く",
|
||||
"placeholderSelectAModel": "モデルを選択",
|
||||
"clipboard": "クリップボード",
|
||||
"apply": "適用",
|
||||
"loadingImage": "画像をロード中",
|
||||
"off": "オフ",
|
||||
"view": "ビュー",
|
||||
"edit": "編集",
|
||||
"ok": "OK",
|
||||
"reset": "リセット",
|
||||
"none": "なし",
|
||||
"new": "新規",
|
||||
"close": "閉じる"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "画像のサイズ",
|
||||
@@ -125,12 +138,114 @@
|
||||
"compareHelp1": "<Kbd>Alt</Kbd> キーを押しながらギャラリー画像をクリックするか、矢印キーを使用して比較画像を変更します。",
|
||||
"compareHelp3": "<Kbd>C</Kbd>を押して、比較した画像を入れ替えます。",
|
||||
"compareHelp4": "<Kbd>[Z</Kbd>]または<Kbd>[Esc</Kbd>]を押して終了します。",
|
||||
"compareHelp2": "<Kbd>M</Kbd> キーを押して比較モードを切り替えます。"
|
||||
"compareHelp2": "<Kbd>M</Kbd> キーを押して比較モードを切り替えます。",
|
||||
"move": "移動",
|
||||
"openViewer": "ビューアを開く",
|
||||
"closeViewer": "ビューアを閉じる",
|
||||
"exitSearch": "画像検索を終了",
|
||||
"oldestFirst": "最古から",
|
||||
"showStarredImagesFirst": "スター付き画像を最初に",
|
||||
"exitBoardSearch": "ボード検索を終了",
|
||||
"showArchivedBoards": "アーカイブされたボードを表示",
|
||||
"searchImages": "メタデータで検索",
|
||||
"gallery": "ギャラリー",
|
||||
"newestFirst": "最新から",
|
||||
"jump": "ジャンプ",
|
||||
"go": "進む",
|
||||
"sortDirection": "並び替え順",
|
||||
"displayBoardSearch": "ボード検索",
|
||||
"displaySearch": "画像を検索",
|
||||
"boardsSettings": "ボード設定",
|
||||
"imagesSettings": "ギャラリー画像設定"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "ホットキーを検索",
|
||||
"clearSearch": "検索をクリア",
|
||||
"noHotkeysFound": "ホットキーが見つかりません"
|
||||
"noHotkeysFound": "ホットキーが見つかりません",
|
||||
"viewer": {
|
||||
"runPostprocessing": {
|
||||
"title": "ポストプロセスを実行"
|
||||
},
|
||||
"useSize": {
|
||||
"title": "サイズを使用"
|
||||
},
|
||||
"recallPrompts": {
|
||||
"title": "プロンプトを再使用"
|
||||
},
|
||||
"recallAll": {
|
||||
"title": "全てのメタデータを再使用"
|
||||
},
|
||||
"recallSeed": {
|
||||
"title": "シード値を再使用"
|
||||
}
|
||||
},
|
||||
"canvas": {
|
||||
"redo": {
|
||||
"title": "やり直し"
|
||||
},
|
||||
"transformSelected": {
|
||||
"title": "変形"
|
||||
},
|
||||
"undo": {
|
||||
"title": "取り消し"
|
||||
},
|
||||
"selectEraserTool": {
|
||||
"title": "消しゴムツール"
|
||||
},
|
||||
"cancelTransform": {
|
||||
"title": "変形をキャンセル"
|
||||
},
|
||||
"resetSelected": {
|
||||
"title": "レイヤーをリセット"
|
||||
},
|
||||
"applyTransform": {
|
||||
"title": "変形を適用"
|
||||
},
|
||||
"selectColorPickerTool": {
|
||||
"title": "スポイトツール"
|
||||
},
|
||||
"fitBboxToCanvas": {
|
||||
"title": "バウンディングボックスをキャンバスにフィット"
|
||||
},
|
||||
"selectBrushTool": {
|
||||
"title": "ブラシツール"
|
||||
},
|
||||
"selectMoveTool": {
|
||||
"title": "移動ツール"
|
||||
},
|
||||
"selectBboxTool": {
|
||||
"title": "バウンディングボックスツール"
|
||||
},
|
||||
"title": "キャンバス",
|
||||
"fitLayersToCanvas": {
|
||||
"title": "レイヤーをキャンバスにフィット"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
"undo": {
|
||||
"title": "取り消し"
|
||||
},
|
||||
"redo": {
|
||||
"title": "やり直し"
|
||||
}
|
||||
},
|
||||
"app": {
|
||||
"toggleLeftPanel": {
|
||||
"title": "左パネルをトグル",
|
||||
"desc": "左パネルを表示または非表示。"
|
||||
},
|
||||
"title": "アプリケーション",
|
||||
"invoke": {
|
||||
"title": "Invoke"
|
||||
},
|
||||
"cancelQueueItem": {
|
||||
"title": "キャンセル"
|
||||
},
|
||||
"clearQueue": {
|
||||
"title": "キューをクリア"
|
||||
}
|
||||
},
|
||||
"hotkeys": "ホットキー"
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "モデルマネージャ",
|
||||
@@ -165,7 +280,7 @@
|
||||
"convertToDiffusers": "ディフューザーに変換",
|
||||
"alpha": "アルファ",
|
||||
"modelConverted": "モデル変換が完了しました",
|
||||
"predictionType": "予測タイプ(安定したディフュージョン 2.x モデルおよび一部の安定したディフュージョン 1.x モデル用)",
|
||||
"predictionType": "予測タイプ(SD 2.x モデルおよび一部のSD 1.x モデル用)",
|
||||
"selectModel": "モデルを選択",
|
||||
"advanced": "高度な設定",
|
||||
"modelDeleted": "モデルが削除されました",
|
||||
@@ -178,7 +293,9 @@
|
||||
"convertToDiffusersHelpText1": "このモデルは 🧨 Diffusers フォーマットに変換されます。",
|
||||
"convertToDiffusersHelpText3": "チェックポイントファイルは、InvokeAIルートフォルダ内にある場合、ディスクから削除されます。カスタムロケーションにある場合は、削除されません。",
|
||||
"convertToDiffusersHelpText4": "これは一回限りのプロセスです。コンピュータの仕様によっては、約30秒から60秒かかる可能性があります。",
|
||||
"cancel": "キャンセル"
|
||||
"cancel": "キャンセル",
|
||||
"uploadImage": "画像をアップロード",
|
||||
"addModels": "モデルを追加"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "画像",
|
||||
@@ -200,7 +317,19 @@
|
||||
"info": "情報",
|
||||
"showOptionsPanel": "オプションパネルを表示",
|
||||
"iterations": "生成回数",
|
||||
"general": "基本設定"
|
||||
"general": "基本設定",
|
||||
"setToOptimalSize": "サイズをモデルに最適化",
|
||||
"invoke": {
|
||||
"addingImagesTo": "画像の追加先"
|
||||
},
|
||||
"aspect": "縦横比",
|
||||
"lockAspectRatio": "縦横比を固定",
|
||||
"scheduler": "スケジューラー",
|
||||
"sendToUpscale": "アップスケーラーに転送",
|
||||
"useSize": "サイズを使用",
|
||||
"postProcessing": "ポストプロセス (Shift + U)",
|
||||
"denoisingStrength": "ノイズ除去強度",
|
||||
"recallMetadata": "メタデータを再使用"
|
||||
},
|
||||
"settings": {
|
||||
"models": "モデル",
|
||||
@@ -213,7 +342,11 @@
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "アップロード失敗",
|
||||
"imageCopied": "画像をコピー"
|
||||
"imageCopied": "画像をコピー",
|
||||
"imageUploadFailed": "画像のアップロードに失敗しました",
|
||||
"uploadFailedInvalidUploadDesc": "画像はPNGかJPGである必要があります。",
|
||||
"sentToUpscale": "アップスケーラーに転送しました",
|
||||
"imageUploaded": "画像をアップロードしました"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "進捗バー",
|
||||
@@ -226,7 +359,10 @@
|
||||
"resetUI": "$t(accessibility.reset) UI",
|
||||
"mode": "モード:",
|
||||
"about": "Invoke について",
|
||||
"submitSupportTicket": "サポート依頼を送信する"
|
||||
"submitSupportTicket": "サポート依頼を送信する",
|
||||
"uploadImages": "画像をアップロード",
|
||||
"toggleLeftPanel": "左パネルをトグル (T)",
|
||||
"toggleRightPanel": "右パネルをトグル (G)"
|
||||
},
|
||||
"metadata": {
|
||||
"Threshold": "ノイズ閾値",
|
||||
@@ -237,7 +373,8 @@
|
||||
"scheduler": "スケジューラー",
|
||||
"positivePrompt": "ポジティブプロンプト",
|
||||
"strength": "Image to Image 強度",
|
||||
"recallParameters": "パラメータを呼び出す"
|
||||
"recallParameters": "パラメータを再使用",
|
||||
"recallParameter": "{{label}} を再使用"
|
||||
},
|
||||
"queue": {
|
||||
"queueEmpty": "キューが空です",
|
||||
@@ -297,14 +434,22 @@
|
||||
"prune": "刈り込み",
|
||||
"prompts_other": "プロンプト",
|
||||
"iterations_other": "繰り返し",
|
||||
"generations_other": "生成"
|
||||
"generations_other": "生成",
|
||||
"canvas": "キャンバス",
|
||||
"workflows": "ワークフロー",
|
||||
"upscaling": "アップスケール",
|
||||
"generation": "生成",
|
||||
"other": "その他",
|
||||
"gallery": "ギャラリー"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "一致するモデルがありません",
|
||||
"loading": "読み込み中",
|
||||
"noMatchingLoRAs": "一致するLoRAがありません",
|
||||
"noModelsAvailable": "使用可能なモデルがありません",
|
||||
"selectModel": "モデルを選択してください"
|
||||
"selectModel": "モデルを選択してください",
|
||||
"concepts": "コンセプト",
|
||||
"addLora": "LoRAを追加"
|
||||
},
|
||||
"nodes": {
|
||||
"addNode": "ノードを追加",
|
||||
@@ -339,7 +484,8 @@
|
||||
"cannotConnectOutputToOutput": "出力から出力には接続できません",
|
||||
"cannotConnectToSelf": "自身のノードには接続できません",
|
||||
"colorCodeEdges": "カラー-Code Edges",
|
||||
"loadingNodes": "ノードを読み込み中..."
|
||||
"loadingNodes": "ノードを読み込み中...",
|
||||
"scheduler": "スケジューラー"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "自動追加するボード",
|
||||
@@ -362,7 +508,18 @@
|
||||
"deleteBoardAndImages": "ボードと画像の削除",
|
||||
"deleteBoardOnly": "ボードのみ削除",
|
||||
"deletedBoardsCannotbeRestored": "削除されたボードは復元できません",
|
||||
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:"
|
||||
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:",
|
||||
"hideBoards": "ボードを隠す",
|
||||
"assetsWithCount_other": "{{count}} のアセット",
|
||||
"addPrivateBoard": "プライベートボードを追加",
|
||||
"addSharedBoard": "共有ボードを追加",
|
||||
"boards": "ボード",
|
||||
"private": "プライベートボード",
|
||||
"shared": "共有ボード",
|
||||
"archiveBoard": "ボードをアーカイブ",
|
||||
"archived": "アーカイブ完了",
|
||||
"unarchiveBoard": "アーカイブされていないボード",
|
||||
"imagesWithCount_other": "{{count}} の画像"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "呼び出しキャッシュ",
|
||||
@@ -387,6 +544,33 @@
|
||||
"paragraphs": [
|
||||
"生成された画像の縦横比。"
|
||||
]
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
"heading": "領域ガイダンスと領域参照画像"
|
||||
},
|
||||
"regionalReferenceImage": {
|
||||
"heading": "領域参照画像"
|
||||
},
|
||||
"paramScheduler": {
|
||||
"heading": "スケジューラー"
|
||||
},
|
||||
"regionalGuidance": {
|
||||
"heading": "領域ガイダンス"
|
||||
},
|
||||
"rasterLayer": {
|
||||
"heading": "ラスターレイヤー"
|
||||
},
|
||||
"globalReferenceImage": {
|
||||
"heading": "全域参照画像"
|
||||
},
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "アップスケール手法"
|
||||
},
|
||||
"upscaleModel": {
|
||||
"heading": "アップスケールモデル"
|
||||
},
|
||||
"paramAspect": {
|
||||
"heading": "縦横比"
|
||||
}
|
||||
},
|
||||
"accordions": {
|
||||
@@ -427,5 +611,79 @@
|
||||
"tabs": {
|
||||
"queue": "キュー"
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
"globalReferenceImage_withCount_other": "全域参照画像",
|
||||
"regionalReferenceImage": "領域参照画像",
|
||||
"saveLayerToAssets": "レイヤーをアセットに保存",
|
||||
"global": "全域",
|
||||
"inpaintMasks_withCount_hidden": "インペイントマスク ({{count}} hidden)",
|
||||
"opacity": "透明度",
|
||||
"canvasContextMenu": {
|
||||
"newRegionalGuidance": "新規領域ガイダンス",
|
||||
"bboxGroup": "バウンディングボックスから作成",
|
||||
"cropCanvasToBbox": "キャンバスをバウンディングボックスでクロップ",
|
||||
"newGlobalReferenceImage": "新規全域参照画像",
|
||||
"newRegionalReferenceImage": "新規領域参照画像"
|
||||
},
|
||||
"regionalGuidance": "領域ガイダンス",
|
||||
"globalReferenceImage": "全域参照画像",
|
||||
"moveForward": "前面へ移動",
|
||||
"copyInpaintMaskTo": "$t(controlLayers.inpaintMask) をコピー",
|
||||
"transform": {
|
||||
"fitToBbox": "バウンディングボックスにフィット",
|
||||
"transform": "変形",
|
||||
"apply": "適用",
|
||||
"cancel": "キャンセル",
|
||||
"reset": "リセット"
|
||||
},
|
||||
"cropLayerToBbox": "レイヤーをバウンディングボックスでクロップ",
|
||||
"convertInpaintMaskTo": "$t(controlLayers.inpaintMask)を変換",
|
||||
"regionalGuidance_withCount_other": "領域ガイダンス",
|
||||
"tool": {
|
||||
"colorPicker": "スポイト",
|
||||
"brush": "ブラシ",
|
||||
"rectangle": "矩形",
|
||||
"move": "移動",
|
||||
"eraser": "消しゴム"
|
||||
},
|
||||
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
||||
"saveBboxToGallery": "バウンディングボックスをギャラリーへ保存",
|
||||
"moveToBack": "最背面へ移動",
|
||||
"duplicate": "複製",
|
||||
"addLayer": "レイヤーを追加",
|
||||
"rasterLayer": "ラスターレイヤー",
|
||||
"inpaintMasks_withCount_visible": "({{count}}) インペイントマスク",
|
||||
"regional": "領域",
|
||||
"rectangle": "矩形",
|
||||
"moveBackward": "背面へ移動",
|
||||
"moveToFront": "最前面へ移動",
|
||||
"mergeDown": "レイヤーを統合",
|
||||
"inpaintMask_withCount_other": "インペイントマスク",
|
||||
"canvas": "キャンバス",
|
||||
"fitBboxToLayers": "バウンディングボックスをレイヤーにフィット",
|
||||
"removeBookmark": "ブックマークを外す",
|
||||
"savedToGalleryOk": "ギャラリーに保存しました"
|
||||
},
|
||||
"stylePresets": {
|
||||
"clearTemplateSelection": "選択したテンプレートをクリア",
|
||||
"choosePromptTemplate": "プロンプトテンプレートを選択",
|
||||
"myTemplates": "自分のテンプレート",
|
||||
"flatten": "選択中のテンプレートをプロンプトに展開",
|
||||
"uploadImage": "画像をアップロード",
|
||||
"defaultTemplates": "デフォルトテンプレート",
|
||||
"createPromptTemplate": "プロンプトテンプレートを作成",
|
||||
"promptTemplateCleared": "プロンプトテンプレートをクリアしました",
|
||||
"searchByName": "名前で検索",
|
||||
"toggleViewMode": "表示モードを切り替え"
|
||||
},
|
||||
"upscaling": {
|
||||
"upscaleModel": "アップスケールモデル",
|
||||
"postProcessingModel": "ポストプロセスモデル",
|
||||
"upscale": "アップスケール"
|
||||
},
|
||||
"sdxl": {
|
||||
"denoisingStrength": "ノイズ除去強度",
|
||||
"scheduler": "スケジューラー"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1660,7 +1660,6 @@
|
||||
"clearCaches": "Очистить кэши",
|
||||
"recalculateRects": "Пересчитать прямоугольники",
|
||||
"saveBboxToGallery": "Сохранить рамку в галерею",
|
||||
"resetCanvas": "Сбросить холст",
|
||||
"canvas": "Холст",
|
||||
"global": "Глобальный",
|
||||
"newGlobalReferenceImageError": "Проблема с созданием глобального эталонного изображения",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -96,7 +96,9 @@
|
||||
"view": "视图",
|
||||
"alpha": "透明度通道",
|
||||
"openInViewer": "在查看器中打开",
|
||||
"clipboard": "剪贴板"
|
||||
"clipboard": "剪贴板",
|
||||
"loadingModel": "加载模型",
|
||||
"generating": "生成中"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "预览大小",
|
||||
@@ -587,7 +589,27 @@
|
||||
"huggingFace": "HuggingFace",
|
||||
"hfTokenInvalid": "HF 令牌无效或缺失",
|
||||
"hfTokenLabel": "HuggingFace 令牌(某些模型所需)",
|
||||
"hfTokenHelperText": "使用某些模型需要 HF 令牌。点击这里创建或获取你的令牌。"
|
||||
"hfTokenHelperText": "使用某些模型需要 HF 令牌。点击这里创建或获取你的令牌。",
|
||||
"includesNModels": "包括 {{n}} 个模型及其依赖项",
|
||||
"starterBundles": "启动器包",
|
||||
"learnMoreAboutSupportedModels": "了解更多关于我们支持的模型的信息",
|
||||
"hfForbidden": "您没有权限访问这个 HF 模型",
|
||||
"hfTokenInvalidErrorMessage": "无效或缺失 HuggingFace 令牌。",
|
||||
"hfTokenRequired": "您正在尝试下载一个需要有效 HuggingFace 令牌的模型。",
|
||||
"hfTokenSaved": "HF 令牌已保存",
|
||||
"hfForbiddenErrorMessage": "我们建议访问 HuggingFace.com 上的仓库页面。所有者可能要求您接受条款才能下载。",
|
||||
"hfTokenUnableToVerifyErrorMessage": "无法验证 HuggingFace 令牌。这可能是由于网络错误导致的。请稍后再试。",
|
||||
"hfTokenInvalidErrorMessage2": "在这里更新它。 ",
|
||||
"hfTokenUnableToVerify": "无法验证 HF 令牌",
|
||||
"skippingXDuplicates_other": "跳过 {{count}} 个重复项",
|
||||
"starterBundleHelpText": "轻松安装所有用于启动基础模型所需的模型,包括主模型、ControlNets、IP适配器等。选择一个安装包时,会跳过已安装的模型。",
|
||||
"installingBundle": "正在安装模型包",
|
||||
"installingModel": "正在安装模型",
|
||||
"installingXModels_other": "正在安装 {{count}} 个模型",
|
||||
"t5Encoder": "T5 编码器",
|
||||
"clipLEmbed": "CLIP-L 嵌入",
|
||||
"clipGEmbed": "CLIP-G 嵌入",
|
||||
"loraModels": "LoRAs(低秩适配)"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "图像",
|
||||
@@ -646,8 +668,23 @@
|
||||
"controlAdapterIncompatibleBaseModel": "Control Adapter的基础模型不兼容",
|
||||
"ipAdapterIncompatibleBaseModel": "IP Adapter的基础模型不兼容",
|
||||
"ipAdapterNoImageSelected": "未选择IP Adapter图像",
|
||||
"rgNoRegion": "未选择区域"
|
||||
}
|
||||
"rgNoRegion": "未选择区域",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},边界框宽度为 {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},缩放后的边界框高度为 {{height}}",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},边界框高度为 {{height}}",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},缩放后的边界框宽度为 {{width}}"
|
||||
},
|
||||
"canvasIsFiltering": "画布正在过滤",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),缩放后的边界框高度为 {{height}}",
|
||||
"noCLIPEmbedModelSelected": "未为FLUX生成选择CLIP嵌入模型",
|
||||
"noFLUXVAEModelSelected": "未为FLUX生成选择VAE模型",
|
||||
"canvasIsRasterizing": "画布正在栅格化",
|
||||
"canvasIsCompositing": "画布正在合成",
|
||||
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),边界框宽度为 {{width}}",
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),缩放后的边界框宽度为 {{width}}",
|
||||
"noT5EncoderModelSelected": "未为FLUX生成选择T5编码器模型",
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),边界框高度为 {{height}}",
|
||||
"canvasIsTransforming": "画布正在变换"
|
||||
},
|
||||
"patchmatchDownScaleSize": "缩小",
|
||||
"clipSkip": "CLIP 跳过层",
|
||||
@@ -669,7 +706,15 @@
|
||||
"sendToUpscale": "发送到放大",
|
||||
"processImage": "处理图像",
|
||||
"infillColorValue": "填充颜色",
|
||||
"coherenceMinDenoise": "最小去噪"
|
||||
"coherenceMinDenoise": "最小去噪",
|
||||
"sendToCanvas": "发送到画布",
|
||||
"disabledNoRasterContent": "已禁用(无栅格内容)",
|
||||
"optimizedImageToImage": "优化的图生图",
|
||||
"guidance": "引导",
|
||||
"gaussianBlur": "高斯模糊",
|
||||
"recallMetadata": "调用元数据",
|
||||
"boxBlur": "方框模糊",
|
||||
"staged": "已分阶段处理"
|
||||
},
|
||||
"settings": {
|
||||
"models": "模型",
|
||||
@@ -699,7 +744,12 @@
|
||||
"enableInformationalPopovers": "启用信息弹窗",
|
||||
"reloadingIn": "重新加载中",
|
||||
"informationalPopoversDisabled": "信息提示框已禁用",
|
||||
"informationalPopoversDisabledDesc": "信息提示框已被禁用.请在设置中重新启用."
|
||||
"informationalPopoversDisabledDesc": "信息提示框已被禁用.请在设置中重新启用.",
|
||||
"enableModelDescriptions": "在下拉菜单中启用模型描述",
|
||||
"confirmOnNewSession": "新会话时确认",
|
||||
"modelDescriptionsDisabledDesc": "下拉菜单中的模型描述已被禁用。可在设置中启用。",
|
||||
"modelDescriptionsDisabled": "下拉菜单中的模型描述已禁用",
|
||||
"showDetailedInvocationProgress": "显示进度详情"
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "上传失败",
|
||||
@@ -740,7 +790,24 @@
|
||||
"errorCopied": "错误信息已复制",
|
||||
"modelImportCanceled": "模型导入已取消",
|
||||
"importFailed": "导入失败",
|
||||
"importSuccessful": "导入成功"
|
||||
"importSuccessful": "导入成功",
|
||||
"layerSavedToAssets": "图层已保存到资产",
|
||||
"sentToUpscale": "已发送到放大处理",
|
||||
"addedToUncategorized": "已添加到看板 $t(boards.uncategorized) 的资产中",
|
||||
"linkCopied": "链接已复制",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "最多只能上传 {{count}} 张 PNG 或 JPEG 图像。",
|
||||
"problemSavingLayer": "无法保存图层",
|
||||
"unableToLoadImage": "无法加载图像",
|
||||
"imageNotLoadedDesc": "无法找到图像",
|
||||
"unableToLoadStylePreset": "无法加载样式预设",
|
||||
"stylePresetLoaded": "样式预设已加载",
|
||||
"problemCopyingLayer": "无法复制图层",
|
||||
"sentToCanvas": "已发送到画布",
|
||||
"unableToLoadImageMetadata": "无法加载图像元数据",
|
||||
"imageSaved": "图像已保存",
|
||||
"imageSavingFailed": "图像保存失败",
|
||||
"layerCopiedToClipboard": "图层已复制到剪贴板",
|
||||
"imagesWillBeAddedTo": "上传的图像将添加到看板 {{boardName}} 的资产中。"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Invoke 进度条",
|
||||
@@ -890,7 +957,12 @@
|
||||
"clearWorkflow": "清除工作流",
|
||||
"imageAccessError": "无法找到图像 {{image_name}},正在恢复默认设置",
|
||||
"boardAccessError": "无法找到面板 {{board_id}},正在恢复默认设置",
|
||||
"modelAccessError": "无法找到模型 {{key}},正在恢复默认设置"
|
||||
"modelAccessError": "无法找到模型 {{key}},正在恢复默认设置",
|
||||
"noWorkflows": "无工作流程",
|
||||
"workflowHelpText": "需要帮助?请查看我们的《<LinkComponent>工作流程入门指南</LinkComponent>》。",
|
||||
"noMatchingWorkflows": "无匹配的工作流程",
|
||||
"saveToGallery": "保存到图库",
|
||||
"singleFieldType": "{{name}}(单一模型)"
|
||||
},
|
||||
"queue": {
|
||||
"status": "状态",
|
||||
@@ -1221,7 +1293,8 @@
|
||||
"heading": "去噪强度",
|
||||
"paragraphs": [
|
||||
"为输入图像添加的噪声量。",
|
||||
"输入 0 会导致结果图像和输入完全相同,输入 1 则会生成全新的图像。"
|
||||
"输入 0 会导致结果图像和输入完全相同,输入 1 则会生成全新的图像。",
|
||||
"当没有具有可见内容的栅格图层时,此设置将被忽略。"
|
||||
]
|
||||
},
|
||||
"paramSeed": {
|
||||
@@ -1410,7 +1483,8 @@
|
||||
"paragraphs": [
|
||||
"控制提示对生成过程的影响程度.",
|
||||
"与生成CFG Scale相似."
|
||||
]
|
||||
],
|
||||
"heading": "CFG比例"
|
||||
},
|
||||
"structure": {
|
||||
"heading": "结构",
|
||||
@@ -1441,6 +1515,62 @@
|
||||
"paragraphs": [
|
||||
"比例控制决定了输出图像的大小,它是基于输入图像分辨率的倍数来计算的.例如对一张1024x1024的图像进行2倍上采样,将会得到一张2048x2048的输出图像."
|
||||
]
|
||||
},
|
||||
"globalReferenceImage": {
|
||||
"heading": "全局参考图像",
|
||||
"paragraphs": [
|
||||
"应用参考图像以影响整个生成过程。"
|
||||
]
|
||||
},
|
||||
"rasterLayer": {
|
||||
"paragraphs": [
|
||||
"画布的基于像素的内容,用于图像生成过程。"
|
||||
],
|
||||
"heading": "栅格图层"
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
"paragraphs": [
|
||||
"对于区域引导,使用画笔引导全局提示中的元素应出现的位置。",
|
||||
"对于区域参考图像,使用画笔将参考图像应用到特定区域。"
|
||||
],
|
||||
"heading": "区域引导与区域参考图像"
|
||||
},
|
||||
"regionalReferenceImage": {
|
||||
"heading": "区域参考图像",
|
||||
"paragraphs": [
|
||||
"使用画笔将参考图像应用到特定区域。"
|
||||
]
|
||||
},
|
||||
"optimizedDenoising": {
|
||||
"heading": "优化的图生图",
|
||||
"paragraphs": [
|
||||
"启用‘优化的图生图’功能,可在使用 Flux 模型进行图生图和图像修复转换时提供更平滑的降噪强度调节。此设置可以提高对图像变化程度的控制能力,但如果您更倾向于使用标准的降噪强度调节方式,也可以关闭此功能。该设置仍在优化中,目前处于测试阶段。"
|
||||
]
|
||||
},
|
||||
"inpainting": {
|
||||
"paragraphs": [
|
||||
"控制由降噪强度引导的修改区域。"
|
||||
],
|
||||
"heading": "图像重绘"
|
||||
},
|
||||
"regionalGuidance": {
|
||||
"heading": "区域引导",
|
||||
"paragraphs": [
|
||||
"使用画笔引导全局提示中的元素应出现的位置。"
|
||||
]
|
||||
},
|
||||
"fluxDevLicense": {
|
||||
"heading": "非商业许可",
|
||||
"paragraphs": [
|
||||
"FLUX.1 [dev] 模型受 FLUX [dev] 非商业许可协议的约束。如需在 Invoke 中将此模型类型用于商业目的,请访问我们的网站了解更多信息。"
|
||||
]
|
||||
},
|
||||
"paramGuidance": {
|
||||
"paragraphs": [
|
||||
"控制提示对生成过程的影响程度。",
|
||||
"较高的引导值可能导致过度饱和,而过高或过低的引导值可能导致生成结果失真。引导仅适用于FLUX DEV模型。"
|
||||
],
|
||||
"heading": "引导"
|
||||
}
|
||||
},
|
||||
"invocationCache": {
|
||||
@@ -1503,7 +1633,18 @@
|
||||
"convertGraph": "转换图表",
|
||||
"loadWorkflow": "$t(common.load) 工作流",
|
||||
"loadFromGraph": "从图表加载工作流",
|
||||
"autoLayout": "自动布局"
|
||||
"autoLayout": "自动布局",
|
||||
"edit": "编辑",
|
||||
"copyShareLinkForWorkflow": "复制工作流程的分享链接",
|
||||
"delete": "删除",
|
||||
"download": "下载",
|
||||
"defaultWorkflows": "默认工作流程",
|
||||
"userWorkflows": "用户工作流程",
|
||||
"projectWorkflows": "项目工作流程",
|
||||
"copyShareLink": "复制分享链接",
|
||||
"chooseWorkflowFromLibrary": "从库中选择工作流程",
|
||||
"uploadAndSaveWorkflow": "上传到库",
|
||||
"deleteWorkflow2": "您确定要删除此工作流程吗?此操作无法撤销。"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
@@ -1542,7 +1683,108 @@
|
||||
"addPositivePrompt": "添加 $t(controlLayers.prompt)",
|
||||
"addNegativePrompt": "添加 $t(controlLayers.negativePrompt)",
|
||||
"rectangle": "矩形",
|
||||
"opacity": "透明度"
|
||||
"opacity": "透明度",
|
||||
"canvas": "画布",
|
||||
"fitBboxToLayers": "将边界框适配到图层",
|
||||
"cropLayerToBbox": "将图层裁剪到边界框",
|
||||
"saveBboxToGallery": "将边界框保存到图库",
|
||||
"savedToGalleryOk": "已保存到图库",
|
||||
"saveLayerToAssets": "将图层保存到资产",
|
||||
"removeBookmark": "移除书签",
|
||||
"regional": "区域",
|
||||
"saveCanvasToGallery": "将画布保存到图库",
|
||||
"global": "全局",
|
||||
"bookmark": "添加书签以快速切换",
|
||||
"regionalReferenceImage": "局部参考图像",
|
||||
"mergingLayers": "正在合并图层",
|
||||
"newControlLayerError": "创建控制层时出现问题",
|
||||
"pullBboxIntoReferenceImageError": "将边界框导入参考图像时出现问题",
|
||||
"mergeVisibleOk": "已合并图层",
|
||||
"maskFill": "遮罩填充",
|
||||
"newCanvasFromImage": "从图像创建新画布",
|
||||
"pullBboxIntoReferenceImageOk": "边界框已导入到参考图像",
|
||||
"globalReferenceImage_withCount_other": "全局参考图像",
|
||||
"addInpaintMask": "添加 $t(controlLayers.inpaintMask)",
|
||||
"referenceImage": "参考图像",
|
||||
"globalReferenceImage": "全局参考图像",
|
||||
"newRegionalGuidance": "新建 $t(controlLayers.regionalGuidance)",
|
||||
"savedToGalleryError": "保存到图库时出错",
|
||||
"copyRasterLayerTo": "复制 $t(controlLayers.rasterLayer) 到",
|
||||
"clearHistory": "清除历史记录",
|
||||
"inpaintMask": "修复遮罩",
|
||||
"regionalGuidance_withCount_visible": "区域引导({{count}} 个)",
|
||||
"inpaintMasks_withCount_hidden": "修复遮罩({{count}} 个已隐藏)",
|
||||
"enableAutoNegative": "启用自动负面提示",
|
||||
"disableAutoNegative": "禁用自动负面提示",
|
||||
"deleteReferenceImage": "删除参考图像",
|
||||
"sendToCanvas": "发送到画布",
|
||||
"controlLayers_withCount_visible": "控制图层({{count}} 个)",
|
||||
"rasterLayers_withCount_visible": "栅格图层({{count}} 个)",
|
||||
"convertRegionalGuidanceTo": "将 $t(controlLayers.regionalGuidance) 转换为",
|
||||
"newInpaintMask": "新建 $t(controlLayers.inpaintMask)",
|
||||
"regionIsEmpty": "选定区域为空",
|
||||
"mergeVisible": "合并可见图层",
|
||||
"showHUD": "显示 HUD(抬头显示)",
|
||||
"newLayerFromImage": "从图像创建新图层",
|
||||
"layer_other": "图层",
|
||||
"transparency": "透明度",
|
||||
"addRasterLayer": "添加 $t(controlLayers.rasterLayer)",
|
||||
"newRasterLayerOk": "已创建栅格层",
|
||||
"newRasterLayerError": "创建栅格层时出现问题",
|
||||
"inpaintMasks_withCount_visible": "修复遮罩({{count}} 个)",
|
||||
"convertRasterLayerTo": "将 $t(controlLayers.rasterLayer) 转换为",
|
||||
"copyControlLayerTo": "复制 $t(controlLayers.controlLayer) 到",
|
||||
"copyInpaintMaskTo": "复制 $t(controlLayers.inpaintMask) 到",
|
||||
"copyRegionalGuidanceTo": "复制 $t(controlLayers.regionalGuidance) 到",
|
||||
"newRasterLayer": "新建 $t(controlLayers.rasterLayer)",
|
||||
"newControlLayer": "新建 $t(controlLayers.controlLayer)",
|
||||
"newImg2ImgCanvasFromImage": "从图像创建新的图生图",
|
||||
"rasterLayer": "栅格层",
|
||||
"controlLayer": "控制层",
|
||||
"outputOnlyMaskedRegions": "仅输出生成的区域",
|
||||
"addControlLayer": "添加 $t(controlLayers.controlLayer)",
|
||||
"newGlobalReferenceImageOk": "已创建全局参考图像",
|
||||
"newGlobalReferenceImageError": "创建全局参考图像时出现问题",
|
||||
"newRegionalReferenceImageOk": "已创建局部参考图像",
|
||||
"newControlLayerOk": "已创建控制层",
|
||||
"mergeVisibleError": "合并图层时出错",
|
||||
"bboxOverlay": "显示边界框覆盖层",
|
||||
"clipToBbox": "将Clip限制到边界框",
|
||||
"width": "宽度",
|
||||
"addGlobalReferenceImage": "添加 $t(controlLayers.globalReferenceImage)",
|
||||
"inpaintMask_withCount_other": "修复遮罩",
|
||||
"regionalGuidance_withCount_other": "区域引导",
|
||||
"newRegionalReferenceImageError": "创建局部参考图像时出现问题",
|
||||
"pullBboxIntoLayerError": "将边界框导入图层时出现问题",
|
||||
"pullBboxIntoLayerOk": "边界框已导入到图层",
|
||||
"sendToCanvasDesc": "按下“Invoke”按钮会将您的工作进度暂存到画布上。",
|
||||
"sendToGallery": "发送到图库",
|
||||
"sendToGalleryDesc": "按下“Invoke”键会生成并保存一张唯一的图像到您的图库中。",
|
||||
"rasterLayer_withCount_other": "栅格图层",
|
||||
"mergeDown": "向下合并",
|
||||
"clearCaches": "清除缓存",
|
||||
"recalculateRects": "重新计算矩形",
|
||||
"duplicate": "复制",
|
||||
"regionalGuidance_withCount_hidden": "区域引导({{count}} 个已隐藏)",
|
||||
"convertControlLayerTo": "将 $t(controlLayers.controlLayer) 转换为",
|
||||
"convertInpaintMaskTo": "将 $t(controlLayers.inpaintMask) 转换为",
|
||||
"viewProgressInViewer": "在 <Btn>图像查看器</Btn> 中查看进度和输出结果。",
|
||||
"viewProgressOnCanvas": "在 <Btn>画布</Btn> 上查看进度和暂存的输出内容。",
|
||||
"sendingToGallery": "将生成内容发送到图库",
|
||||
"copyToClipboard": "复制到剪贴板",
|
||||
"controlLayer_withCount_other": "控制图层",
|
||||
"sendingToCanvas": "在画布上准备生成",
|
||||
"addReferenceImage": "添加 $t(controlLayers.referenceImage)",
|
||||
"addRegionalGuidance": "添加 $t(controlLayers.regionalGuidance)",
|
||||
"controlLayers_withCount_hidden": "控制图层({{count}} 个已隐藏)",
|
||||
"rasterLayers_withCount_hidden": "栅格图层({{count}} 个已隐藏)",
|
||||
"globalReferenceImages_withCount_hidden": "全局参考图像({{count}} 个已隐藏)",
|
||||
"globalReferenceImages_withCount_visible": "全局参考图像({{count}} 个)",
|
||||
"layer_withCount_other": "图层({{count}} 个)",
|
||||
"enableTransparencyEffect": "启用透明效果",
|
||||
"disableTransparencyEffect": "禁用透明效果",
|
||||
"hidingType": "隐藏 {{type}}",
|
||||
"showingType": "显示 {{type}}"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
|
||||
@@ -27,6 +27,7 @@ import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/Cl
|
||||
import { DeleteStylePresetDialog } from 'features/stylePresets/components/DeleteStylePresetDialog';
|
||||
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
|
||||
import RefreshAfterResetModal from 'features/system/components/SettingsModal/RefreshAfterResetModal';
|
||||
import { VideosModal } from 'features/system/components/VideosModal/VideosModal';
|
||||
import { configChanged } from 'features/system/store/configSlice';
|
||||
import { selectLanguage } from 'features/system/store/systemSelectors';
|
||||
import { AppContent } from 'features/ui/components/AppContent';
|
||||
@@ -108,6 +109,7 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
<NewCanvasSessionDialog />
|
||||
<ImageContextMenu />
|
||||
<FullscreenDropzone />
|
||||
<VideosModal />
|
||||
</ErrorBoundary>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppStore } from 'app/store/storeHooks';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { withResultAsync } from 'common/util/result';
|
||||
@@ -9,6 +10,7 @@ import { imageDTOToImageObject } from 'features/controlLayers/store/util';
|
||||
import { $imageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { sentImageToCanvas } from 'features/gallery/store/actions';
|
||||
import { parseAndRecallAllMetadata } from 'features/metadata/util/handlers';
|
||||
import { $hasTemplates } from 'features/nodes/store/nodesSlice';
|
||||
import { $isWorkflowListMenuIsOpen } from 'features/nodes/store/workflowListMenu';
|
||||
import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
@@ -51,6 +53,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
const { t } = useTranslation();
|
||||
// Use a ref to ensure that we only perform the action once
|
||||
const didInit = useRef(false);
|
||||
const didParseOpenAPISchema = useStore($hasTemplates);
|
||||
const store = useAppStore();
|
||||
const { getAndLoadWorkflow } = useGetAndLoadLibraryWorkflow();
|
||||
|
||||
@@ -174,7 +177,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (didInit.current || !action) {
|
||||
if (didInit.current || !action || !didParseOpenAPISchema) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -187,22 +190,29 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
case 'selectStylePreset':
|
||||
handleSelectStylePreset(action.data.stylePresetId);
|
||||
break;
|
||||
|
||||
case 'sendToCanvas':
|
||||
handleSendToCanvas(action.data.imageName);
|
||||
break;
|
||||
|
||||
case 'useAllParameters':
|
||||
handleUseAllMetadata(action.data.imageName);
|
||||
break;
|
||||
|
||||
case 'goToDestination':
|
||||
handleGoToDestination(action.data.destination);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}, [
|
||||
handleSendToCanvas,
|
||||
handleUseAllMetadata,
|
||||
action,
|
||||
handleLoadWorkflow,
|
||||
handleSelectStylePreset,
|
||||
handleGoToDestination,
|
||||
handleLoadWorkflow,
|
||||
didParseOpenAPISchema,
|
||||
]);
|
||||
};
|
||||
|
||||
@@ -4,7 +4,7 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'
|
||||
import { buildAdHocPostProcessingGraph } from 'features/nodes/util/graph/buildAdHocPostProcessingGraph';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig, ImageDTO } from 'services/api/types';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
@@ -32,9 +32,7 @@ export const addAdHocPostProcessingRequestedListener = (startAppListening: AppSt
|
||||
|
||||
try {
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, enqueueMutationFixedCacheKeyOptions)
|
||||
);
|
||||
|
||||
const enqueueResult = await req.unwrap();
|
||||
|
||||
@@ -8,11 +8,12 @@ import { $canvasManager } from 'features/controlLayers/store/ephemeral';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph';
|
||||
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
|
||||
import { buildSD3Graph } from 'features/nodes/util/graph/generation/buildSD3Graph';
|
||||
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
|
||||
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import { assert, AssertionError } from 'tsafe';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
@@ -34,8 +35,8 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
let buildGraphResult: Result<
|
||||
{
|
||||
g: Graph;
|
||||
noise: Invocation<'noise' | 'flux_denoise'>;
|
||||
posCond: Invocation<'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder'>;
|
||||
noise: Invocation<'noise' | 'flux_denoise' | 'sd3_denoise'>;
|
||||
posCond: Invocation<'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder' | 'sd3_text_encoder'>;
|
||||
},
|
||||
Error
|
||||
>;
|
||||
@@ -51,6 +52,9 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
case `sd-2`:
|
||||
buildGraphResult = await withResultAsync(() => buildSD1Graph(state, manager));
|
||||
break;
|
||||
case `sd-3`:
|
||||
buildGraphResult = await withResultAsync(() => buildSD3Graph(state, manager));
|
||||
break;
|
||||
case `flux`:
|
||||
buildGraphResult = await withResultAsync(() => buildFLUXGraph(state, manager));
|
||||
break;
|
||||
@@ -87,9 +91,7 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
}
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(prepareBatchResult.value, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
queueApi.endpoints.enqueueBatch.initiate(prepareBatchResult.value, enqueueMutationFixedCacheKeyOptions)
|
||||
);
|
||||
req.reset();
|
||||
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
import { isImageBatchNode } from 'features/nodes/types/invocation';
|
||||
import { isImageFieldCollectionInputInstance } from 'features/nodes/types/field';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { buildNodesGraph } from 'features/nodes/util/graph/buildNodesGraph';
|
||||
import { buildWorkflowWithValidation } from 'features/nodes/util/workflow/buildWorkflow';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { Batch, BatchConfig } from 'services/api/types';
|
||||
|
||||
const log = logger('workflows');
|
||||
|
||||
export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
|
||||
@@ -30,13 +34,28 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
const data: Batch['data'] = [];
|
||||
|
||||
// Skip edges from batch nodes - these should not be in the graph, they exist only in the UI
|
||||
const imageBatchNodes = nodes.nodes.filter(isImageBatchNode);
|
||||
for (const imageBatch of imageBatchNodes) {
|
||||
const edge = nodes.edges.find((e) => e.source === imageBatch.id);
|
||||
if (!edge || !edge.targetHandle) {
|
||||
const imageBatchNodes = nodes.nodes.filter(isInvocationNode).filter((node) => node.data.type === 'image_batch');
|
||||
for (const node of imageBatchNodes) {
|
||||
const images = node.data.inputs['images'];
|
||||
if (!isImageFieldCollectionInputInstance(images)) {
|
||||
log.warn({ nodeId: node.id }, 'Image batch images field is not an image collection');
|
||||
break;
|
||||
}
|
||||
data.push([{ node_path: edge.target, field_name: edge.targetHandle, items: imageBatch.data.images }]);
|
||||
const edgesFromImageBatch = nodes.edges.filter((e) => e.source === node.id && e.sourceHandle === 'image');
|
||||
const batchDataCollectionItem: NonNullable<Batch['data']>[number] = [];
|
||||
for (const edge of edgesFromImageBatch) {
|
||||
if (!edge.targetHandle) {
|
||||
break;
|
||||
}
|
||||
batchDataCollectionItem.push({
|
||||
node_path: edge.target,
|
||||
field_name: edge.targetHandle,
|
||||
items: images.value,
|
||||
});
|
||||
}
|
||||
if (batchDataCollectionItem.length > 0) {
|
||||
data.push(batchDataCollectionItem);
|
||||
}
|
||||
}
|
||||
|
||||
const batchConfig: BatchConfig = {
|
||||
@@ -51,11 +70,7 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
prepend: action.payload.prepend,
|
||||
};
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
const req = dispatch(queueApi.endpoints.enqueueBatch.initiate(batchConfig, enqueueMutationFixedCacheKeyOptions));
|
||||
try {
|
||||
await req.unwrap();
|
||||
} finally {
|
||||
|
||||
@@ -2,7 +2,7 @@ import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildMultidiffusionUpscaleGraph } from 'features/nodes/util/graph/buildMultidiffusionUpscaleGraph';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
|
||||
export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
@@ -16,11 +16,7 @@ export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening)
|
||||
|
||||
const batchConfig = prepareLinearUIBatch(state, g, prepend, noise, posCond, 'upscaling', 'gallery');
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
const req = dispatch(queueApi.endpoints.enqueueBatch.initiate(batchConfig, enqueueMutationFixedCacheKeyOptions));
|
||||
try {
|
||||
await req.unwrap();
|
||||
} finally {
|
||||
|
||||
@@ -41,29 +41,33 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
|
||||
log.debug({ imageDTO }, 'Image uploaded');
|
||||
|
||||
if (action.meta.arg.originalArgs.silent || imageDTO.is_intermediate) {
|
||||
// When a "silent" upload is requested, or the image is intermediate, we can skip all post-upload actions,
|
||||
// like toasts and switching the gallery view
|
||||
return;
|
||||
}
|
||||
|
||||
const boardId = imageDTO.board_id ?? 'none';
|
||||
|
||||
if (action.meta.arg.originalArgs.withToast) {
|
||||
const DEFAULT_UPLOADED_TOAST = {
|
||||
id: 'IMAGE_UPLOADED',
|
||||
title: t('toast.imageUploaded'),
|
||||
status: 'success',
|
||||
} as const;
|
||||
const DEFAULT_UPLOADED_TOAST = {
|
||||
id: 'IMAGE_UPLOADED',
|
||||
title: t('toast.imageUploaded'),
|
||||
status: 'success',
|
||||
} as const;
|
||||
|
||||
// default action - just upload and alert user
|
||||
if (lastUploadedToastTimeout !== null) {
|
||||
window.clearTimeout(lastUploadedToastTimeout);
|
||||
}
|
||||
const toastApi = toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
title: DEFAULT_UPLOADED_TOAST.title,
|
||||
description: getUploadedToastDescription(boardId, state),
|
||||
duration: null, // we will close the toast manually
|
||||
});
|
||||
lastUploadedToastTimeout = window.setTimeout(() => {
|
||||
toastApi.close();
|
||||
}, 3000);
|
||||
// default action - just upload and alert user
|
||||
if (lastUploadedToastTimeout !== null) {
|
||||
window.clearTimeout(lastUploadedToastTimeout);
|
||||
}
|
||||
const toastApi = toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
title: DEFAULT_UPLOADED_TOAST.title,
|
||||
description: getUploadedToastDescription(boardId, state),
|
||||
duration: null, // we will close the toast manually
|
||||
});
|
||||
lastUploadedToastTimeout = window.setTimeout(() => {
|
||||
toastApi.close();
|
||||
}, 3000);
|
||||
|
||||
/**
|
||||
* We only want to change the board and view if this is the first upload of a batch, else we end up hijacking
|
||||
|
||||
@@ -26,7 +26,6 @@ export type AppFeature =
|
||||
| 'bulkDownload'
|
||||
| 'starterModels'
|
||||
| 'hfToken';
|
||||
|
||||
/**
|
||||
* A disable-able Stable Diffusion feature
|
||||
*/
|
||||
|
||||
@@ -2,6 +2,7 @@ import { deepClone } from 'common/util/deepClone';
|
||||
import { merge } from 'lodash-es';
|
||||
import { ClickScrollPlugin, OverlayScrollbars } from 'overlayscrollbars';
|
||||
import type { UseOverlayScrollbarsParams } from 'overlayscrollbars-react';
|
||||
import type { CSSProperties } from 'react';
|
||||
|
||||
OverlayScrollbars.plugin(ClickScrollPlugin);
|
||||
|
||||
@@ -27,3 +28,8 @@ export const getOverlayScrollbarsParams = (
|
||||
merge(params, { options: { overflow: { y: overflowY, x: overflowX } } });
|
||||
return params;
|
||||
};
|
||||
|
||||
export const overlayScrollbarsStyles: CSSProperties = {
|
||||
height: '100%',
|
||||
width: '100%',
|
||||
};
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import {
|
||||
useNewCanvasSession,
|
||||
useNewGallerySession,
|
||||
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
|
||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
||||
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowsCounterClockwiseBold, PiFilePlusBold } from 'react-icons/pi';
|
||||
|
||||
export const SessionMenuItems = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const { newGallerySessionWithDialog } = useNewGallerySession();
|
||||
const { newCanvasSessionWithDialog } = useNewCanvasSession();
|
||||
const resetCanvasLayers = useCallback(() => {
|
||||
dispatch(canvasReset());
|
||||
}, [dispatch]);
|
||||
const resetGenerationSettings = useCallback(() => {
|
||||
dispatch(paramsReset());
|
||||
}, [dispatch]);
|
||||
return (
|
||||
<>
|
||||
<MenuItem icon={<PiFilePlusBold />} onClick={newGallerySessionWithDialog}>
|
||||
{t('controlLayers.newGallerySession')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiFilePlusBold />} onClick={newCanvasSessionWithDialog}>
|
||||
{t('controlLayers.newCanvasSession')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClick={resetCanvasLayers}>
|
||||
{t('controlLayers.resetCanvasLayers')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClick={resetGenerationSettings}>
|
||||
{t('controlLayers.resetGenerationSettings')}
|
||||
</MenuItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
SessionMenuItems.displayName = 'SessionMenuItems';
|
||||
@@ -46,7 +46,7 @@ const REGION_TARGETS: Record<FocusRegionName, Set<HTMLElement>> = {
|
||||
/**
|
||||
* The currently-focused region or `null` if no region is focused.
|
||||
*/
|
||||
const $focusedRegion = atom<FocusRegionName | null>(null);
|
||||
export const $focusedRegion = atom<FocusRegionName | null>(null);
|
||||
|
||||
/**
|
||||
* A map of focus regions to atoms that indicate if that region is focused.
|
||||
|
||||
@@ -61,6 +61,11 @@ export const useImageUploadButton = ({ onUpload, isDisabled, allowMultiple }: Us
|
||||
log.warn('Multiple files dropped but only one allowed');
|
||||
return;
|
||||
}
|
||||
if (files.length === 0) {
|
||||
// Should never happen
|
||||
log.warn('No files dropped');
|
||||
return;
|
||||
}
|
||||
const file = files[0];
|
||||
assert(file !== undefined); // should never happen
|
||||
const imageDTO = await uploadImage({
|
||||
@@ -68,18 +73,20 @@ export const useImageUploadButton = ({ onUpload, isDisabled, allowMultiple }: Us
|
||||
image_category: 'user',
|
||||
is_intermediate: false,
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
silent: true,
|
||||
}).unwrap();
|
||||
if (onUpload) {
|
||||
onUpload(imageDTO);
|
||||
}
|
||||
} else {
|
||||
//
|
||||
const imageDTOs = await uploadImages(
|
||||
files.map((file) => ({
|
||||
files.map((file, i) => ({
|
||||
file,
|
||||
image_category: 'user',
|
||||
is_intermediate: false,
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
silent: false,
|
||||
isFirstUploadOfBatch: i === 0,
|
||||
}))
|
||||
);
|
||||
if (onUpload) {
|
||||
@@ -134,11 +141,9 @@ export const useImageUploadButton = ({ onUpload, isDisabled, allowMultiple }: Us
|
||||
};
|
||||
|
||||
const sx = {
|
||||
borderColor: 'error.500',
|
||||
borderStyle: 'solid',
|
||||
borderWidth: 0,
|
||||
borderRadius: 'base',
|
||||
'&[data-error=true]': {
|
||||
borderColor: 'error.500',
|
||||
borderStyle: 'solid',
|
||||
borderWidth: 1,
|
||||
},
|
||||
} satisfies SystemStyleObject;
|
||||
@@ -157,7 +162,34 @@ export const UploadImageButton = ({
|
||||
<>
|
||||
<IconButton
|
||||
aria-label="Upload image"
|
||||
variant="ghost"
|
||||
variant="outline"
|
||||
sx={sx}
|
||||
data-error={isError}
|
||||
icon={<PiUploadBold />}
|
||||
isLoading={uploadApi.request.isLoading}
|
||||
{...rest}
|
||||
{...uploadApi.getUploadButtonProps()}
|
||||
/>
|
||||
<input {...uploadApi.getUploadInputProps()} />
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export const UploadMultipleImageButton = ({
|
||||
isDisabled = false,
|
||||
onUpload,
|
||||
isError = false,
|
||||
...rest
|
||||
}: {
|
||||
onUpload?: (imageDTOs: ImageDTO[]) => void;
|
||||
isError?: boolean;
|
||||
} & SetOptional<IconButtonProps, 'aria-label'>) => {
|
||||
const uploadApi = useImageUploadButton({ isDisabled, allowMultiple: true, onUpload });
|
||||
return (
|
||||
<>
|
||||
<IconButton
|
||||
aria-label="Upload image"
|
||||
variant="outline"
|
||||
sx={sx}
|
||||
data-error={isError}
|
||||
icon={<PiUploadBold />}
|
||||
|
||||
@@ -1,331 +0,0 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { $true } from 'app/store/nanostores/util';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useCanvasManagerSafe } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
import type { Templates } from 'features/nodes/store/types';
|
||||
import { selectWorkflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { selectUpscaleSlice } from 'features/parameters/store/upscaleSlice';
|
||||
import { selectConfigSlice } from 'features/system/store/configSlice';
|
||||
import { selectSystemSlice } from 'features/system/store/systemSlice';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import i18n from 'i18next';
|
||||
import { forEach, upperFirst } from 'lodash-es';
|
||||
import { useMemo } from 'react';
|
||||
import { getConnectedEdges } from 'reactflow';
|
||||
import { $isConnected } from 'services/events/stores';
|
||||
|
||||
const LAYER_TYPE_TO_TKEY = {
|
||||
reference_image: 'controlLayers.referenceImage',
|
||||
inpaint_mask: 'controlLayers.inpaintMask',
|
||||
regional_guidance: 'controlLayers.regionalGuidance',
|
||||
raster_layer: 'controlLayers.rasterLayer',
|
||||
control_layer: 'controlLayers.controlLayer',
|
||||
} as const;
|
||||
|
||||
const createSelector = (
|
||||
templates: Templates,
|
||||
isConnected: boolean,
|
||||
canvasIsFiltering: boolean,
|
||||
canvasIsTransforming: boolean,
|
||||
canvasIsRasterizing: boolean,
|
||||
canvasIsCompositing: boolean
|
||||
) =>
|
||||
createMemoizedSelector(
|
||||
[
|
||||
selectSystemSlice,
|
||||
selectNodesSlice,
|
||||
selectWorkflowSettingsSlice,
|
||||
selectDynamicPromptsSlice,
|
||||
selectCanvasSlice,
|
||||
selectParamsSlice,
|
||||
selectUpscaleSlice,
|
||||
selectConfigSlice,
|
||||
selectActiveTab,
|
||||
],
|
||||
(system, nodes, workflowSettings, dynamicPrompts, canvas, params, upscale, config, activeTabName) => {
|
||||
const { bbox } = canvas;
|
||||
const { model, positivePrompt } = params;
|
||||
|
||||
const reasons: { prefix?: string; content: string }[] = [];
|
||||
|
||||
// Cannot generate if not connected
|
||||
if (!isConnected) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.systemDisconnected') });
|
||||
}
|
||||
|
||||
if (activeTabName === 'workflows') {
|
||||
if (workflowSettings.shouldValidateGraph) {
|
||||
if (!nodes.nodes.length) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noNodesInGraph') });
|
||||
}
|
||||
|
||||
nodes.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const nodeTemplate = templates[node.data.type];
|
||||
|
||||
if (!nodeTemplate) {
|
||||
// Node type not found
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingNodeTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
const connectedEdges = getConnectedEdges([node], nodes.edges);
|
||||
|
||||
forEach(node.data.inputs, (field) => {
|
||||
const fieldTemplate = nodeTemplate.inputs[field.name];
|
||||
const hasConnection = connectedEdges.some(
|
||||
(edge) => edge.target === node.id && edge.targetHandle === field.name
|
||||
);
|
||||
|
||||
if (!fieldTemplate) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingFieldTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
if (fieldTemplate.required && field.value === undefined && !hasConnection) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.missingInputForField', {
|
||||
nodeLabel: node.data.label || nodeTemplate.title,
|
||||
fieldLabel: field.label || fieldTemplate.title,
|
||||
}),
|
||||
});
|
||||
return;
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
} else if (activeTabName === 'upscaling') {
|
||||
if (!upscale.upscaleInitialImage) {
|
||||
reasons.push({ content: i18n.t('upscaling.missingUpscaleInitialImage') });
|
||||
} else if (config.maxUpscaleDimension) {
|
||||
const { width, height } = upscale.upscaleInitialImage;
|
||||
const { scale } = upscale;
|
||||
|
||||
const maxPixels = config.maxUpscaleDimension ** 2;
|
||||
const upscaledPixels = width * scale * height * scale;
|
||||
|
||||
if (upscaledPixels > maxPixels) {
|
||||
reasons.push({ content: i18n.t('upscaling.exceedsMaxSize') });
|
||||
}
|
||||
}
|
||||
if (model && !['sd-1', 'sdxl'].includes(model.base)) {
|
||||
// When we are using an upsupported model, do not add the other warnings
|
||||
reasons.push({ content: i18n.t('upscaling.incompatibleBaseModel') });
|
||||
} else {
|
||||
// Using a compatible model, add all warnings
|
||||
if (!model) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelSelected') });
|
||||
}
|
||||
if (!upscale.upscaleModel) {
|
||||
reasons.push({ content: i18n.t('upscaling.missingUpscaleModel') });
|
||||
}
|
||||
if (!upscale.tileControlnetModel) {
|
||||
reasons.push({ content: i18n.t('upscaling.missingTileControlNetModel') });
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (canvasIsFiltering) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsFiltering') });
|
||||
}
|
||||
if (canvasIsTransforming) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsTransforming') });
|
||||
}
|
||||
if (canvasIsRasterizing) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsRasterizing') });
|
||||
}
|
||||
if (canvasIsCompositing) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsCompositing') });
|
||||
}
|
||||
|
||||
if (dynamicPrompts.prompts.length === 0 && getShouldProcessPrompt(positivePrompt)) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noPrompts') });
|
||||
}
|
||||
|
||||
if (!model) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelSelected') });
|
||||
}
|
||||
|
||||
if (model?.base === 'flux') {
|
||||
if (!params.t5EncoderModel) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noT5EncoderModelSelected') });
|
||||
}
|
||||
if (!params.clipEmbedModel) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noCLIPEmbedModelSelected') });
|
||||
}
|
||||
if (!params.fluxVAE) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noFLUXVAEModelSelected') });
|
||||
}
|
||||
if (bbox.scaleMethod === 'none') {
|
||||
if (bbox.rect.width % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleBboxWidth', { width: bbox.rect.width }),
|
||||
});
|
||||
}
|
||||
if (bbox.rect.height % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleBboxHeight', { height: bbox.rect.height }),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (bbox.scaledSize.width % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleScaledBboxWidth', {
|
||||
width: bbox.scaledSize.width,
|
||||
}),
|
||||
});
|
||||
}
|
||||
if (bbox.scaledSize.height % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleScaledBboxHeight', {
|
||||
height: bbox.scaledSize.height,
|
||||
}),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
canvas.controlLayers.entities
|
||||
.filter((controlLayer) => controlLayer.isEnabled)
|
||||
.forEach((controlLayer, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY['control_layer']);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
// Must have model
|
||||
if (!controlLayer.controlAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (controlLayer.controlAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
canvas.referenceImages.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
|
||||
// Must have model
|
||||
if (!entity.ipAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (entity.ipAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have an image
|
||||
if (!entity.ipAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoImageSelected'));
|
||||
}
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
canvas.regionalGuidance.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
// Must have a region
|
||||
if (entity.objects.length === 0) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.rgNoRegion'));
|
||||
}
|
||||
// Must have at least 1 prompt or IP Adapter
|
||||
if (
|
||||
entity.positivePrompt === null &&
|
||||
entity.negativePrompt === null &&
|
||||
entity.referenceImages.length === 0
|
||||
) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.rgNoPromptsOrIPAdapters'));
|
||||
}
|
||||
entity.referenceImages.forEach(({ ipAdapter }) => {
|
||||
// Must have model
|
||||
if (!ipAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (ipAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have an image
|
||||
if (!ipAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoImageSelected'));
|
||||
}
|
||||
});
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
canvas.rasterLayers.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return { isReady: !reasons.length, reasons };
|
||||
}
|
||||
);
|
||||
|
||||
export const useIsReadyToEnqueue = () => {
|
||||
const templates = useStore($templates);
|
||||
const isConnected = useStore($isConnected);
|
||||
const canvasManager = useCanvasManagerSafe();
|
||||
const canvasIsFiltering = useStore(canvasManager?.stateApi.$isFiltering ?? $true);
|
||||
const canvasIsTransforming = useStore(canvasManager?.stateApi.$isTransforming ?? $true);
|
||||
const canvasIsRasterizing = useStore(canvasManager?.stateApi.$isRasterizing ?? $true);
|
||||
const canvasIsCompositing = useStore(canvasManager?.compositor.$isBusy ?? $true);
|
||||
const selector = useMemo(
|
||||
() =>
|
||||
createSelector(
|
||||
templates,
|
||||
isConnected,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsCompositing
|
||||
),
|
||||
[templates, isConnected, canvasIsFiltering, canvasIsTransforming, canvasIsRasterizing, canvasIsCompositing]
|
||||
);
|
||||
const value = useAppSelector(selector);
|
||||
return value;
|
||||
};
|
||||
@@ -9,7 +9,7 @@ import {
|
||||
useAddRegionalGuidance,
|
||||
useAddRegionalReferenceImage,
|
||||
} from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { selectIsFLUX } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectIsFLUX, selectIsSD3 } from 'features/controlLayers/store/paramsSlice';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
@@ -23,6 +23,7 @@ export const CanvasAddEntityButtons = memo(() => {
|
||||
const addGlobalReferenceImage = useAddGlobalReferenceImage();
|
||||
const addRegionalReferenceImage = useAddRegionalReferenceImage();
|
||||
const isFLUX = useAppSelector(selectIsFLUX);
|
||||
const isSD3 = useAppSelector(selectIsSD3);
|
||||
|
||||
return (
|
||||
<Flex w="full" h="full" justifyContent="center" gap={4}>
|
||||
@@ -36,6 +37,7 @@ export const CanvasAddEntityButtons = memo(() => {
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addGlobalReferenceImage}
|
||||
isDisabled={isSD3}
|
||||
>
|
||||
{t('controlLayers.globalReferenceImage')}
|
||||
</Button>
|
||||
@@ -61,7 +63,7 @@ export const CanvasAddEntityButtons = memo(() => {
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addRegionalGuidance}
|
||||
isDisabled={isFLUX}
|
||||
isDisabled={isSD3}
|
||||
>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</Button>
|
||||
@@ -73,7 +75,7 @@ export const CanvasAddEntityButtons = memo(() => {
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addRegionalReferenceImage}
|
||||
isDisabled={isFLUX}
|
||||
isDisabled={isFLUX || isSD3}
|
||||
>
|
||||
{t('controlLayers.regionalReferenceImage')}
|
||||
</Button>
|
||||
@@ -88,6 +90,7 @@ export const CanvasAddEntityButtons = memo(() => {
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addControlLayer}
|
||||
isDisabled={isSD3}
|
||||
>
|
||||
{t('controlLayers.controlLayer')}
|
||||
</Button>
|
||||
|
||||
@@ -0,0 +1,61 @@
|
||||
import { Alert, AlertDescription, AlertIcon, AlertTitle } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useDeferredModelLoadingInvocationProgressMessage } from 'features/controlLayers/hooks/useDeferredModelLoadingInvocationProgressMessage';
|
||||
import { selectIsLocal } from 'features/system/store/configSlice';
|
||||
import { selectSystemShouldShowInvocationProgressDetail } from 'features/system/store/systemSlice';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { $invocationProgressMessage } from 'services/events/stores';
|
||||
|
||||
const CanvasAlertsInvocationProgressContentLocal = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const invocationProgressMessage = useStore($invocationProgressMessage);
|
||||
|
||||
if (!invocationProgressMessage) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Alert status="loading" borderRadius="base" fontSize="sm" shadow="md" w="fit-content">
|
||||
<AlertIcon />
|
||||
<AlertTitle>{t('common.generating')}</AlertTitle>
|
||||
<AlertDescription>{invocationProgressMessage}</AlertDescription>
|
||||
</Alert>
|
||||
);
|
||||
});
|
||||
CanvasAlertsInvocationProgressContentLocal.displayName = 'CanvasAlertsInvocationProgressContentLocal';
|
||||
|
||||
const CanvasAlertsInvocationProgressContentCommercial = memo(() => {
|
||||
const message = useDeferredModelLoadingInvocationProgressMessage();
|
||||
|
||||
if (!message) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Alert status="loading" borderRadius="base" fontSize="sm" shadow="md" w="fit-content">
|
||||
<AlertIcon />
|
||||
<AlertDescription>{message}</AlertDescription>
|
||||
</Alert>
|
||||
);
|
||||
});
|
||||
CanvasAlertsInvocationProgressContentCommercial.displayName = 'CanvasAlertsInvocationProgressContentCommercial';
|
||||
|
||||
export const CanvasAlertsInvocationProgress = memo(() => {
|
||||
const shouldShowInvocationProgressDetail = useAppSelector(selectSystemShouldShowInvocationProgressDetail);
|
||||
const isLocal = useAppSelector(selectIsLocal);
|
||||
|
||||
if (!isLocal) {
|
||||
return <CanvasAlertsInvocationProgressContentCommercial />;
|
||||
}
|
||||
|
||||
// OSS user setting
|
||||
if (!shouldShowInvocationProgressDetail) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return <CanvasAlertsInvocationProgressContentLocal />;
|
||||
});
|
||||
|
||||
CanvasAlertsInvocationProgress.displayName = 'CanvasAlertsInvocationProgress';
|
||||
@@ -3,6 +3,7 @@ import { Alert, AlertIcon, AlertTitle } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { useEntityAdapterSafe } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { useEntityTitle } from 'features/controlLayers/hooks/useEntityTitle';
|
||||
import { useEntityTypeIsHidden } from 'features/controlLayers/hooks/useEntityTypeIsHidden';
|
||||
@@ -29,17 +30,23 @@ type AlertData = {
|
||||
title: string;
|
||||
};
|
||||
|
||||
const buildSelectIsEnabled = (entityIdentifier: CanvasEntityIdentifier) =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'CanvasAlertsSelectedEntityStatusContent').isEnabled
|
||||
);
|
||||
|
||||
const buildSelectIsLocked = (entityIdentifier: CanvasEntityIdentifier) =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'CanvasAlertsSelectedEntityStatusContent').isLocked
|
||||
);
|
||||
|
||||
const CanvasAlertsSelectedEntityStatusContent = memo(({ entityIdentifier, adapter }: ContentProps) => {
|
||||
const { t } = useTranslation();
|
||||
const title = useEntityTitle(entityIdentifier);
|
||||
const selectIsEnabled = useMemo(
|
||||
() => createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).isEnabled),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const selectIsLocked = useMemo(
|
||||
() => createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).isLocked),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const selectIsEnabled = useMemo(() => buildSelectIsEnabled(entityIdentifier), [entityIdentifier]);
|
||||
const selectIsLocked = useMemo(() => buildSelectIsLocked(entityIdentifier), [entityIdentifier]);
|
||||
const isEnabled = useAppSelector(selectIsEnabled);
|
||||
const isLocked = useAppSelector(selectIsLocked);
|
||||
const isHidden = useEntityTypeIsHidden(entityIdentifier.type);
|
||||
@@ -115,7 +122,11 @@ export const CanvasAlertsSelectedEntityStatus = memo(() => {
|
||||
return null;
|
||||
}
|
||||
|
||||
return <CanvasAlertsSelectedEntityStatusContent entityIdentifier={selectedEntityIdentifier} adapter={adapter} />;
|
||||
return (
|
||||
<CanvasEntityStateGate entityIdentifier={selectedEntityIdentifier}>
|
||||
<CanvasAlertsSelectedEntityStatusContent entityIdentifier={selectedEntityIdentifier} adapter={adapter} />
|
||||
</CanvasEntityStateGate>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasAlertsSelectedEntityStatus.displayName = 'CanvasAlertsSelectedEntityStatus';
|
||||
|
||||
@@ -5,6 +5,7 @@ import { InpaintMaskMenuItems } from 'features/controlLayers/components/InpaintM
|
||||
import { IPAdapterMenuItems } from 'features/controlLayers/components/IPAdapter/IPAdapterMenuItems';
|
||||
import { RasterLayerMenuItems } from 'features/controlLayers/components/RasterLayer/RasterLayerMenuItems';
|
||||
import { RegionalGuidanceMenuItems } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceMenuItems';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import {
|
||||
EntityIdentifierContext,
|
||||
useEntityIdentifierContext,
|
||||
@@ -40,6 +41,15 @@ const CanvasContextMenuSelectedEntityMenuItemsContent = memo(() => {
|
||||
|
||||
CanvasContextMenuSelectedEntityMenuItemsContent.displayName = 'CanvasContextMenuSelectedEntityMenuItemsContent';
|
||||
|
||||
const CanvasContextMenuSelectedEntityMenuGroup = memo((props: PropsWithChildren) => {
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const title = useEntityTypeString(entityIdentifier.type);
|
||||
|
||||
return <MenuGroup title={title}>{props.children}</MenuGroup>;
|
||||
});
|
||||
|
||||
CanvasContextMenuSelectedEntityMenuGroup.displayName = 'CanvasContextMenuSelectedEntityMenuGroup';
|
||||
|
||||
export const CanvasContextMenuSelectedEntityMenuItems = memo(() => {
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
|
||||
@@ -49,20 +59,13 @@ export const CanvasContextMenuSelectedEntityMenuItems = memo(() => {
|
||||
|
||||
return (
|
||||
<EntityIdentifierContext.Provider value={selectedEntityIdentifier}>
|
||||
<CanvasContextMenuSelectedEntityMenuGroup>
|
||||
<CanvasContextMenuSelectedEntityMenuItemsContent />
|
||||
</CanvasContextMenuSelectedEntityMenuGroup>
|
||||
<CanvasEntityStateGate entityIdentifier={selectedEntityIdentifier}>
|
||||
<CanvasContextMenuSelectedEntityMenuGroup>
|
||||
<CanvasContextMenuSelectedEntityMenuItemsContent />
|
||||
</CanvasContextMenuSelectedEntityMenuGroup>
|
||||
</CanvasEntityStateGate>
|
||||
</EntityIdentifierContext.Provider>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasContextMenuSelectedEntityMenuItems.displayName = 'CanvasContextMenuSelectedEntityMenuItems';
|
||||
|
||||
const CanvasContextMenuSelectedEntityMenuGroup = memo((props: PropsWithChildren) => {
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const title = useEntityTypeString(entityIdentifier.type);
|
||||
|
||||
return <MenuGroup title={title}>{props.children}</MenuGroup>;
|
||||
});
|
||||
|
||||
CanvasContextMenuSelectedEntityMenuGroup.displayName = 'CanvasContextMenuSelectedEntityMenuGroup';
|
||||
|
||||
@@ -9,7 +9,7 @@ import {
|
||||
useAddRegionalReferenceImage,
|
||||
} from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { selectIsFLUX } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectIsFLUX, selectIsSD3 } from 'features/controlLayers/store/paramsSlice';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
@@ -24,6 +24,7 @@ export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
|
||||
const addRasterLayer = useAddRasterLayer();
|
||||
const addControlLayer = useAddControlLayer();
|
||||
const isFLUX = useAppSelector(selectIsFLUX);
|
||||
const isSD3 = useAppSelector(selectIsSD3);
|
||||
|
||||
return (
|
||||
<Menu>
|
||||
@@ -40,7 +41,7 @@ export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
|
||||
/>
|
||||
<MenuList>
|
||||
<MenuGroup title={t('controlLayers.global')}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addGlobalReferenceImage}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addGlobalReferenceImage} isDisabled={isSD3}>
|
||||
{t('controlLayers.globalReferenceImage')}
|
||||
</MenuItem>
|
||||
</MenuGroup>
|
||||
@@ -48,15 +49,15 @@ export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask}>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance} isDisabled={isFLUX}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance} isDisabled={isSD3}>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalReferenceImage} isDisabled={isFLUX}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalReferenceImage} isDisabled={isFLUX || isSD3}>
|
||||
{t('controlLayers.regionalReferenceImage')}
|
||||
</MenuItem>
|
||||
</MenuGroup>
|
||||
<MenuGroup title={t('controlLayers.layer_other')}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addControlLayer}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addControlLayer} isDisabled={isSD3}>
|
||||
{t('controlLayers.controlLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRasterLayer}>
|
||||
|
||||
@@ -4,6 +4,7 @@ import { attachClosestEdge, extractClosestEdge } from '@atlaskit/pragmatic-drag-
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { singleCanvasEntityDndSource } from 'features/dnd/dnd';
|
||||
import { type DndListTargetState, idle } from 'features/dnd/types';
|
||||
import { firefoxDndFix } from 'features/dnd/util';
|
||||
import type { RefObject } from 'react';
|
||||
import { useEffect, useState } from 'react';
|
||||
|
||||
@@ -17,6 +18,7 @@ export const useCanvasEntityListDnd = (ref: RefObject<HTMLElement>, entityIdenti
|
||||
return;
|
||||
}
|
||||
return combine(
|
||||
firefoxDndFix(element),
|
||||
draggable({
|
||||
element,
|
||||
getInitialData() {
|
||||
|
||||
@@ -21,6 +21,8 @@ import { GatedImageViewer } from 'features/gallery/components/ImageViewer/ImageV
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { PiDotsThreeOutlineVerticalFill } from 'react-icons/pi';
|
||||
|
||||
import { CanvasAlertsInvocationProgress } from './CanvasAlerts/CanvasAlertsInvocationProgress';
|
||||
|
||||
const MenuContent = () => {
|
||||
return (
|
||||
<CanvasManagerProviderGate>
|
||||
@@ -84,6 +86,7 @@ export const CanvasMainPanelContent = memo(() => {
|
||||
<CanvasAlertsSelectedEntityStatus />
|
||||
<CanvasAlertsPreserveMask />
|
||||
<CanvasAlertsSendingToGallery />
|
||||
<CanvasAlertsInvocationProgress />
|
||||
</Flex>
|
||||
<Flex position="absolute" top={1} insetInlineEnd={1}>
|
||||
<Menu>
|
||||
|
||||
@@ -7,6 +7,7 @@ import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/c
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { ControlLayerBadges } from 'features/controlLayers/components/ControlLayer/ControlLayerBadges';
|
||||
import { ControlLayerSettings } from 'features/controlLayers/components/ControlLayer/ControlLayerSettings';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { ControlLayerAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
@@ -36,24 +37,26 @@ export const ControlLayer = memo(({ id }: Props) => {
|
||||
return (
|
||||
<EntityIdentifierContext.Provider value={entityIdentifier}>
|
||||
<ControlLayerAdapterGate>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<ControlLayerBadges />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<CanvasEntitySettingsWrapper>
|
||||
<ControlLayerSettings />
|
||||
</CanvasEntitySettingsWrapper>
|
||||
<DndDropTarget
|
||||
dndTarget={replaceCanvasEntityObjectsWithImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.replaceLayer')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</CanvasEntityContainer>
|
||||
<CanvasEntityStateGate entityIdentifier={entityIdentifier}>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<ControlLayerBadges />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<CanvasEntitySettingsWrapper>
|
||||
<ControlLayerSettings />
|
||||
</CanvasEntitySettingsWrapper>
|
||||
<DndDropTarget
|
||||
dndTarget={replaceCanvasEntityObjectsWithImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.replaceLayer')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</CanvasEntityContainer>
|
||||
</CanvasEntityStateGate>
|
||||
</ControlLayerAdapterGate>
|
||||
</EntityIdentifierContext.Provider>
|
||||
);
|
||||
|
||||
@@ -1,26 +1,47 @@
|
||||
import { Badge } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import { memo } from 'react';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const ControlLayerBadges = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const { t } = useTranslation();
|
||||
const withTransparencyEffect = useAppSelector(
|
||||
(s) => selectEntityOrThrow(selectCanvasSlice(s), entityIdentifier).withTransparencyEffect
|
||||
const buildSelectWithTransparencyEffect = (entityIdentifier: CanvasEntityIdentifier<'control_layer'>) =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'ControlLayerBadgesContent').withTransparencyEffect
|
||||
);
|
||||
|
||||
const ControlLayerBadgesContent = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const { t } = useTranslation();
|
||||
const selectWithTransparencyEffect = useMemo(
|
||||
() => buildSelectWithTransparencyEffect(entityIdentifier),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const withTransparencyEffect = useAppSelector(selectWithTransparencyEffect);
|
||||
|
||||
if (!withTransparencyEffect) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
{withTransparencyEffect && (
|
||||
<Badge color="base.300" bg="transparent" borderWidth={1} userSelect="none">
|
||||
{t('controlLayers.transparency')}
|
||||
</Badge>
|
||||
)}
|
||||
</>
|
||||
<Badge color="base.300" bg="transparent" borderWidth={1} userSelect="none">
|
||||
{t('controlLayers.transparency')}
|
||||
</Badge>
|
||||
);
|
||||
});
|
||||
|
||||
ControlLayerBadgesContent.displayName = 'ControlLayerBadgesContent';
|
||||
|
||||
export const ControlLayerBadges = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
return (
|
||||
<CanvasEntityStateGate entityIdentifier={entityIdentifier}>
|
||||
<ControlLayerBadgesContent />
|
||||
</CanvasEntityStateGate>
|
||||
);
|
||||
});
|
||||
ControlLayerBadges.displayName = 'ControlLayerBadges';
|
||||
|
||||
@@ -28,24 +28,18 @@ import { useTranslation } from 'react-i18next';
|
||||
import { PiBoundingBoxBold, PiShootingStarFill, PiUploadBold } from 'react-icons/pi';
|
||||
import type { ControlNetModelConfig, ImageDTO, T2IAdapterModelConfig } from 'services/api/types';
|
||||
|
||||
const useControlLayerControlAdapter = (entityIdentifier: CanvasEntityIdentifier<'control_layer'>) => {
|
||||
const selectControlAdapter = useMemo(
|
||||
() =>
|
||||
createMemoizedAppSelector(selectCanvasSlice, (canvas) => {
|
||||
const layer = selectEntityOrThrow(canvas, entityIdentifier);
|
||||
return layer.controlAdapter;
|
||||
}),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const controlAdapter = useAppSelector(selectControlAdapter);
|
||||
return controlAdapter;
|
||||
};
|
||||
const buildSelectControlAdapter = (entityIdentifier: CanvasEntityIdentifier<'control_layer'>) =>
|
||||
createMemoizedAppSelector(selectCanvasSlice, (canvas) => {
|
||||
const layer = selectEntityOrThrow(canvas, entityIdentifier, 'ControlLayerControlAdapter');
|
||||
return layer.controlAdapter;
|
||||
});
|
||||
|
||||
export const ControlLayerControlAdapter = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const { dispatch, getState } = useAppStore();
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const controlAdapter = useControlLayerControlAdapter(entityIdentifier);
|
||||
const selectControlAdapter = useMemo(() => buildSelectControlAdapter(entityIdentifier), [entityIdentifier]);
|
||||
const controlAdapter = useAppSelector(selectControlAdapter);
|
||||
const filter = useEntityFilter(entityIdentifier);
|
||||
const isFLUX = useAppSelector(selectIsFLUX);
|
||||
const adapter = useEntityAdapterContext('control_layer');
|
||||
|
||||
@@ -5,21 +5,25 @@ import { useEntityIdentifierContext } from 'features/controlLayers/contexts/Enti
|
||||
import { useEntityIsLocked } from 'features/controlLayers/hooks/useEntityIsLocked';
|
||||
import { controlLayerWithTransparencyEffectToggled } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiDropHalfBold } from 'react-icons/pi';
|
||||
|
||||
const buildSelectWithTransparencyEffect = (entityIdentifier: CanvasEntityIdentifier<'control_layer'>) =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) =>
|
||||
selectEntityOrThrow(canvas, entityIdentifier, 'ControlLayerMenuItemsTransparencyEffect').withTransparencyEffect
|
||||
);
|
||||
|
||||
export const ControlLayerMenuItemsTransparencyEffect = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const isLocked = useEntityIsLocked(entityIdentifier);
|
||||
const selectWithTransparencyEffect = useMemo(
|
||||
() =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier);
|
||||
return entity.withTransparencyEffect;
|
||||
}),
|
||||
() => buildSelectWithTransparencyEffect(entityIdentifier),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const withTransparencyEffect = useAppSelector(selectWithTransparencyEffect);
|
||||
|
||||
@@ -4,6 +4,7 @@ import { CanvasEntityHeader } from 'features/controlLayers/components/common/Can
|
||||
import { CanvasEntityHeaderCommonActions } from 'features/controlLayers/components/common/CanvasEntityHeaderCommonActions';
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { IPAdapterSettings } from 'features/controlLayers/components/IPAdapter/IPAdapterSettings';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo, useMemo } from 'react';
|
||||
@@ -17,14 +18,16 @@ export const IPAdapter = memo(({ id }: Props) => {
|
||||
|
||||
return (
|
||||
<EntityIdentifierContext.Provider value={entityIdentifier}>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader ps={4} py={5}>
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<IPAdapterSettings />
|
||||
</CanvasEntityContainer>
|
||||
<CanvasEntityStateGate entityIdentifier={entityIdentifier}>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader ps={4} py={5}>
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<IPAdapterSettings />
|
||||
</CanvasEntityContainer>
|
||||
</CanvasEntityStateGate>
|
||||
</EntityIdentifierContext.Provider>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import type { IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import { isIPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import { selectSystemShouldEnableModelDescriptions } from 'features/system/store/systemSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { assert } from 'tsafe';
|
||||
@@ -14,13 +16,27 @@ type Props = {
|
||||
|
||||
export const IPAdapterMethod = memo(({ method, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const shouldShowModelDescriptions = useAppSelector(selectSystemShouldEnableModelDescriptions);
|
||||
|
||||
const options: { label: string; value: IPMethodV2 }[] = useMemo(
|
||||
() => [
|
||||
{ label: t('controlLayers.ipAdapterMethod.full'), value: 'full' },
|
||||
{ label: t('controlLayers.ipAdapterMethod.style'), value: 'style' },
|
||||
{ label: t('controlLayers.ipAdapterMethod.composition'), value: 'composition' },
|
||||
{
|
||||
label: t('controlLayers.ipAdapterMethod.full'),
|
||||
value: 'full',
|
||||
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.fullDesc') : undefined,
|
||||
},
|
||||
{
|
||||
label: t('controlLayers.ipAdapterMethod.style'),
|
||||
value: 'style',
|
||||
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.styleDesc') : undefined,
|
||||
},
|
||||
{
|
||||
label: t('controlLayers.ipAdapterMethod.composition'),
|
||||
value: 'composition',
|
||||
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.compositionDesc') : undefined,
|
||||
},
|
||||
],
|
||||
[t]
|
||||
[t, shouldShowModelDescriptions]
|
||||
);
|
||||
const _onChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
|
||||
@@ -5,6 +5,7 @@ import { BeginEndStepPct } from 'features/controlLayers/components/common/BeginE
|
||||
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
|
||||
import { Weight } from 'features/controlLayers/components/common/Weight';
|
||||
import { IPAdapterMethod } from 'features/controlLayers/components/IPAdapter/IPAdapterMethod';
|
||||
import { IPAdapterSettingsEmptyState } from 'features/controlLayers/components/IPAdapter/IPAdapterSettingsEmptyState';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { usePullBboxIntoGlobalReferenceImage } from 'features/controlLayers/hooks/saveCanvasHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
@@ -17,8 +18,8 @@ import {
|
||||
referenceImageIPAdapterWeightChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectIsFLUX } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import { selectCanvasSlice, selectEntity, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier, CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import type { SetGlobalReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
import { setGlobalReferenceImageDndTarget } from 'features/dnd/dnd';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
@@ -29,14 +30,17 @@ import type { ImageDTO, IPAdapterModelConfig } from 'services/api/types';
|
||||
import { IPAdapterImagePreview } from './IPAdapterImagePreview';
|
||||
import { IPAdapterModel } from './IPAdapterModel';
|
||||
|
||||
export const IPAdapterSettings = memo(() => {
|
||||
const buildSelectIPAdapter = (entityIdentifier: CanvasEntityIdentifier<'reference_image'>) =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'IPAdapterSettings').ipAdapter
|
||||
);
|
||||
|
||||
const IPAdapterSettingsContent = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('reference_image');
|
||||
const selectIPAdapter = useMemo(
|
||||
() => createSelector(selectCanvasSlice, (s) => selectEntityOrThrow(s, entityIdentifier).ipAdapter),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const selectIPAdapter = useMemo(() => buildSelectIPAdapter(entityIdentifier), [entityIdentifier]);
|
||||
const ipAdapter = useAppSelector(selectIPAdapter);
|
||||
|
||||
const onChangeBeginEndStepPct = useCallback(
|
||||
@@ -131,4 +135,25 @@ export const IPAdapterSettings = memo(() => {
|
||||
);
|
||||
});
|
||||
|
||||
IPAdapterSettingsContent.displayName = 'IPAdapterSettingsContent';
|
||||
|
||||
const buildSelectIPAdapterHasImage = (entityIdentifier: CanvasEntityIdentifier<'reference_image'>) =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const referenceImage = selectEntity(canvas, entityIdentifier);
|
||||
return !!referenceImage && referenceImage.ipAdapter.image !== null;
|
||||
});
|
||||
|
||||
export const IPAdapterSettings = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('reference_image');
|
||||
|
||||
const selectIPAdapterHasImage = useMemo(() => buildSelectIPAdapterHasImage(entityIdentifier), [entityIdentifier]);
|
||||
const hasImage = useAppSelector(selectIPAdapterHasImage);
|
||||
|
||||
if (!hasImage) {
|
||||
return <IPAdapterSettingsEmptyState />;
|
||||
}
|
||||
|
||||
return <IPAdapterSettingsContent />;
|
||||
});
|
||||
|
||||
IPAdapterSettings.displayName = 'IPAdapterSettings';
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
import { Button, Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import type { SetGlobalReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
import { setGlobalReferenceImageDndTarget } from 'features/dnd/dnd';
|
||||
import { DndDropTarget } from 'features/dnd/DndDropTarget';
|
||||
import { setGlobalReferenceImage } from 'features/imageActions/actions';
|
||||
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { Trans, useTranslation } from 'react-i18next';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
export const IPAdapterSettingsEmptyState = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const entityIdentifier = useEntityIdentifierContext('reference_image');
|
||||
const dispatch = useAppDispatch();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const onUpload = useCallback(
|
||||
(imageDTO: ImageDTO) => {
|
||||
setGlobalReferenceImage({ imageDTO, entityIdentifier, dispatch });
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
);
|
||||
const uploadApi = useImageUploadButton({ onUpload, allowMultiple: false });
|
||||
const onClickGalleryButton = useCallback(() => {
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}, [dispatch]);
|
||||
|
||||
const dndTargetData = useMemo<SetGlobalReferenceImageDndTargetData>(
|
||||
() => setGlobalReferenceImageDndTarget.getData({ entityIdentifier }),
|
||||
[entityIdentifier]
|
||||
);
|
||||
|
||||
const components = useMemo(
|
||||
() => ({
|
||||
UploadButton: (
|
||||
<Button isDisabled={isBusy} size="sm" variant="link" color="base.300" {...uploadApi.getUploadButtonProps()} />
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}),
|
||||
[isBusy, onClickGalleryButton, uploadApi]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={3} position="relative" w="full" p={4}>
|
||||
<Text textAlign="center" color="base.300">
|
||||
<Trans i18nKey="controlLayers.referenceImageEmptyState" components={components} />
|
||||
</Text>
|
||||
<input {...uploadApi.getUploadInputProps()} />
|
||||
<DndDropTarget
|
||||
dndTarget={setGlobalReferenceImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.useImage')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
IPAdapterSettingsEmptyState.displayName = 'IPAdapterSettingsEmptyState';
|
||||
@@ -4,6 +4,7 @@ import { CanvasEntityHeader } from 'features/controlLayers/components/common/Can
|
||||
import { CanvasEntityHeaderCommonActions } from 'features/controlLayers/components/common/CanvasEntityHeaderCommonActions';
|
||||
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { InpaintMaskAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
@@ -19,14 +20,16 @@ export const InpaintMask = memo(({ id }: Props) => {
|
||||
return (
|
||||
<EntityIdentifierContext.Provider value={entityIdentifier}>
|
||||
<InpaintMaskAdapterGate>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
</CanvasEntityContainer>
|
||||
<CanvasEntityStateGate entityIdentifier={entityIdentifier}>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
</CanvasEntityContainer>
|
||||
</CanvasEntityStateGate>
|
||||
</InpaintMaskAdapterGate>
|
||||
</EntityIdentifierContext.Provider>
|
||||
);
|
||||
|
||||
@@ -4,6 +4,7 @@ import { CanvasEntityHeader } from 'features/controlLayers/components/common/Can
|
||||
import { CanvasEntityHeaderCommonActions } from 'features/controlLayers/components/common/CanvasEntityHeaderCommonActions';
|
||||
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { RasterLayerAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
@@ -30,20 +31,22 @@ export const RasterLayer = memo(({ id }: Props) => {
|
||||
return (
|
||||
<EntityIdentifierContext.Provider value={entityIdentifier}>
|
||||
<RasterLayerAdapterGate>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<DndDropTarget
|
||||
dndTarget={replaceCanvasEntityObjectsWithImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.replaceLayer')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</CanvasEntityContainer>
|
||||
<CanvasEntityStateGate entityIdentifier={entityIdentifier}>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<DndDropTarget
|
||||
dndTarget={replaceCanvasEntityObjectsWithImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.replaceLayer')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</CanvasEntityContainer>
|
||||
</CanvasEntityStateGate>
|
||||
</RasterLayerAdapterGate>
|
||||
</EntityIdentifierContext.Provider>
|
||||
);
|
||||
|
||||
@@ -6,6 +6,7 @@ import { CanvasEntityPreviewImage } from 'features/controlLayers/components/comm
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { RegionalGuidanceBadges } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceBadges';
|
||||
import { RegionalGuidanceSettings } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceSettings';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { RegionalGuidanceAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
@@ -24,16 +25,18 @@ export const RegionalGuidance = memo(({ id }: Props) => {
|
||||
return (
|
||||
<EntityIdentifierContext.Provider value={entityIdentifier}>
|
||||
<RegionalGuidanceAdapterGate>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<RegionalGuidanceBadges />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<RegionalGuidanceSettings />
|
||||
</CanvasEntityContainer>
|
||||
<CanvasEntityStateGate entityIdentifier={entityIdentifier}>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<RegionalGuidanceBadges />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<RegionalGuidanceSettings />
|
||||
</CanvasEntityContainer>
|
||||
</CanvasEntityStateGate>
|
||||
</RegionalGuidanceAdapterGate>
|
||||
</EntityIdentifierContext.Provider>
|
||||
);
|
||||
|
||||
@@ -10,7 +10,11 @@ export const RegionalGuidanceBadges = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const { t } = useTranslation();
|
||||
const selectAutoNegative = useMemo(
|
||||
() => createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).autoNegative),
|
||||
() =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'RegionalGuidanceBadges').autoNegative
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const autoNegative = useAppSelector(selectAutoNegative);
|
||||
|
||||
@@ -1,27 +1,28 @@
|
||||
import { IconButton, Tooltip } from '@invoke-ai/ui-library';
|
||||
import type { IconButtonProps } from '@invoke-ai/ui-library';
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiTrashSimpleFill } from 'react-icons/pi';
|
||||
import { PiXBold } from 'react-icons/pi';
|
||||
|
||||
type Props = {
|
||||
type Props = Omit<IconButtonProps, 'aria-label'> & {
|
||||
onDelete: () => void;
|
||||
};
|
||||
|
||||
export const RegionalGuidanceDeletePromptButton = memo(({ onDelete }: Props) => {
|
||||
export const RegionalGuidanceDeletePromptButton = memo(({ onDelete, ...rest }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Tooltip label={t('controlLayers.deletePrompt')}>
|
||||
<IconButton
|
||||
variant="link"
|
||||
aria-label={t('controlLayers.deletePrompt')}
|
||||
icon={<PiTrashSimpleFill />}
|
||||
onClick={onDelete}
|
||||
flexGrow={0}
|
||||
size="sm"
|
||||
p={0}
|
||||
colorScheme="error"
|
||||
/>
|
||||
</Tooltip>
|
||||
<IconButton
|
||||
tooltip={t('common.delete')}
|
||||
variant="link"
|
||||
aria-label={t('common.delete')}
|
||||
icon={<PiXBold />}
|
||||
onClick={onDelete}
|
||||
flexGrow={0}
|
||||
size="sm"
|
||||
p={0}
|
||||
colorScheme="error"
|
||||
{...rest}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import { Weight } from 'features/controlLayers/components/common/Weight';
|
||||
import { IPAdapterImagePreview } from 'features/controlLayers/components/IPAdapter/IPAdapterImagePreview';
|
||||
import { IPAdapterMethod } from 'features/controlLayers/components/IPAdapter/IPAdapterMethod';
|
||||
import { IPAdapterModel } from 'features/controlLayers/components/IPAdapter/IPAdapterModel';
|
||||
import { RegionalGuidanceIPAdapterSettingsEmptyState } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceIPAdapterSettingsEmptyState';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { usePullBboxIntoRegionalGuidanceReferenceImage } from 'features/controlLayers/hooks/saveCanvasHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
@@ -19,12 +20,12 @@ import {
|
||||
rgIPAdapterWeightChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectRegionalGuidanceReferenceImage } from 'features/controlLayers/store/selectors';
|
||||
import type { CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import type { CanvasEntityIdentifier, CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import type { SetRegionalGuidanceReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
import { setRegionalGuidanceReferenceImageDndTarget } from 'features/dnd/dnd';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiBoundingBoxBold, PiTrashSimpleFill } from 'react-icons/pi';
|
||||
import { PiBoundingBoxBold, PiXBold } from 'react-icons/pi';
|
||||
import type { ImageDTO, IPAdapterModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
@@ -32,7 +33,7 @@ type Props = {
|
||||
referenceImageId: string;
|
||||
};
|
||||
|
||||
export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Props) => {
|
||||
const RegionalGuidanceIPAdapterSettingsContent = memo(({ referenceImageId }: Props) => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
@@ -115,7 +116,7 @@ export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Pro
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
icon={<PiTrashSimpleFill />}
|
||||
icon={<PiXBold />}
|
||||
tooltip={t('controlLayers.deleteReferenceImage')}
|
||||
aria-label={t('controlLayers.deleteReferenceImage')}
|
||||
onClick={onDeleteIPAdapter}
|
||||
@@ -161,4 +162,31 @@ export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Pro
|
||||
);
|
||||
});
|
||||
|
||||
RegionalGuidanceIPAdapterSettingsContent.displayName = 'RegionalGuidanceIPAdapterSettingsContent';
|
||||
|
||||
const buildSelectIPAdapterHasImage = (
|
||||
entityIdentifier: CanvasEntityIdentifier<'regional_guidance'>,
|
||||
referenceImageId: string
|
||||
) =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const referenceImage = selectRegionalGuidanceReferenceImage(canvas, entityIdentifier, referenceImageId);
|
||||
return !!referenceImage && referenceImage.ipAdapter.image !== null;
|
||||
});
|
||||
|
||||
export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Props) => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
|
||||
const selectIPAdapterHasImage = useMemo(
|
||||
() => buildSelectIPAdapterHasImage(entityIdentifier, referenceImageId),
|
||||
[entityIdentifier, referenceImageId]
|
||||
);
|
||||
const hasImage = useAppSelector(selectIPAdapterHasImage);
|
||||
|
||||
if (!hasImage) {
|
||||
return <RegionalGuidanceIPAdapterSettingsEmptyState referenceImageId={referenceImageId} />;
|
||||
}
|
||||
|
||||
return <RegionalGuidanceIPAdapterSettingsContent referenceImageId={referenceImageId} />;
|
||||
});
|
||||
|
||||
RegionalGuidanceIPAdapterSettings.displayName = 'RegionalGuidanceIPAdapterSettings';
|
||||
|
||||
@@ -0,0 +1,82 @@
|
||||
import { Button, Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import { RegionalGuidanceDeletePromptButton } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceDeletePromptButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { rgIPAdapterDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||
import type { SetRegionalGuidanceReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
import { setRegionalGuidanceReferenceImageDndTarget } from 'features/dnd/dnd';
|
||||
import { DndDropTarget } from 'features/dnd/DndDropTarget';
|
||||
import { setRegionalGuidanceReferenceImage } from 'features/imageActions/actions';
|
||||
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { Trans, useTranslation } from 'react-i18next';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
referenceImageId: string;
|
||||
};
|
||||
|
||||
export const RegionalGuidanceIPAdapterSettingsEmptyState = memo(({ referenceImageId }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const dispatch = useAppDispatch();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const onUpload = useCallback(
|
||||
(imageDTO: ImageDTO) => {
|
||||
setRegionalGuidanceReferenceImage({ imageDTO, entityIdentifier, referenceImageId, dispatch });
|
||||
},
|
||||
[dispatch, entityIdentifier, referenceImageId]
|
||||
);
|
||||
const uploadApi = useImageUploadButton({ onUpload, allowMultiple: false });
|
||||
const onClickGalleryButton = useCallback(() => {
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}, [dispatch]);
|
||||
const onDeleteIPAdapter = useCallback(() => {
|
||||
dispatch(rgIPAdapterDeleted({ entityIdentifier, referenceImageId }));
|
||||
}, [dispatch, entityIdentifier, referenceImageId]);
|
||||
|
||||
const dndTargetData = useMemo<SetRegionalGuidanceReferenceImageDndTargetData>(
|
||||
() =>
|
||||
setRegionalGuidanceReferenceImageDndTarget.getData({
|
||||
entityIdentifier,
|
||||
referenceImageId,
|
||||
}),
|
||||
[entityIdentifier, referenceImageId]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={3} position="relative" w="full" p={4}>
|
||||
<RegionalGuidanceDeletePromptButton onDelete={onDeleteIPAdapter} position="absolute" top={0} insetInlineEnd={0} />
|
||||
<Text textAlign="center" color="base.300">
|
||||
<Trans
|
||||
i18nKey="controlLayers.referenceImageEmptyState"
|
||||
components={{
|
||||
UploadButton: (
|
||||
<Button
|
||||
isDisabled={isBusy}
|
||||
size="sm"
|
||||
variant="link"
|
||||
color="base.300"
|
||||
{...uploadApi.getUploadButtonProps()}
|
||||
/>
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}}
|
||||
/>
|
||||
</Text>
|
||||
<input {...uploadApi.getUploadInputProps()} />
|
||||
<DndDropTarget
|
||||
dndTarget={setRegionalGuidanceReferenceImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.useImage')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
RegionalGuidanceIPAdapterSettingsEmptyState.displayName = 'RegionalGuidanceIPAdapterSettingsEmptyState';
|
||||
@@ -13,7 +13,11 @@ export const RegionalGuidanceIPAdapters = memo(() => {
|
||||
const selectIPAdapterIds = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
const ipAdapterIds = selectEntityOrThrow(canvas, entityIdentifier).referenceImages.map(({ id }) => id);
|
||||
const ipAdapterIds = selectEntityOrThrow(
|
||||
canvas,
|
||||
entityIdentifier,
|
||||
'RegionalGuidanceIPAdapters'
|
||||
).referenceImages.map(({ id }) => id);
|
||||
if (ipAdapterIds.length === 0) {
|
||||
return EMPTY_ARRAY;
|
||||
}
|
||||
|
||||
@@ -13,7 +13,11 @@ export const RegionalGuidanceMenuItemsAutoNegative = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const selectAutoNegative = useMemo(
|
||||
() => createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).autoNegative),
|
||||
() =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'RegionalGuidanceMenuItemsAutoNegative').autoNegative
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const autoNegative = useAppSelector(selectAutoNegative);
|
||||
|
||||
@@ -20,7 +20,10 @@ export const RegionalGuidanceNegativePrompt = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const selectPrompt = useMemo(
|
||||
() =>
|
||||
createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).negativePrompt ?? ''),
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'RegionalGuidanceNegativePrompt').negativePrompt ?? ''
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const prompt = useAppSelector(selectPrompt);
|
||||
|
||||
@@ -20,7 +20,10 @@ export const RegionalGuidancePositivePrompt = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const selectPrompt = useMemo(
|
||||
() =>
|
||||
createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).positivePrompt ?? ''),
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'RegionalGuidancePositivePrompt').positivePrompt ?? ''
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const prompt = useAppSelector(selectPrompt);
|
||||
|
||||
@@ -5,26 +5,25 @@ import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/c
|
||||
import { RegionalGuidanceAddPromptsIPAdapterButtons } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceAddPromptsIPAdapterButtons';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
import { RegionalGuidanceIPAdapters } from './RegionalGuidanceIPAdapters';
|
||||
import { RegionalGuidanceNegativePrompt } from './RegionalGuidanceNegativePrompt';
|
||||
import { RegionalGuidancePositivePrompt } from './RegionalGuidancePositivePrompt';
|
||||
|
||||
const buildSelectFlags = (entityIdentifier: CanvasEntityIdentifier<'regional_guidance'>) =>
|
||||
createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier, 'RegionalGuidanceSettings');
|
||||
return {
|
||||
hasPositivePrompt: entity.positivePrompt !== null,
|
||||
hasNegativePrompt: entity.negativePrompt !== null,
|
||||
hasIPAdapters: entity.referenceImages.length > 0,
|
||||
};
|
||||
});
|
||||
export const RegionalGuidanceSettings = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const selectFlags = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier);
|
||||
return {
|
||||
hasPositivePrompt: entity.positivePrompt !== null,
|
||||
hasNegativePrompt: entity.negativePrompt !== null,
|
||||
hasIPAdapters: entity.referenceImages.length > 0,
|
||||
};
|
||||
}),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const selectFlags = useMemo(() => buildSelectFlags(entityIdentifier), [entityIdentifier]);
|
||||
const flags = useAppSelector(selectFlags);
|
||||
|
||||
return (
|
||||
|
||||
@@ -1,17 +1,22 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { withResultAsync } from 'common/util/result';
|
||||
import { selectSelectedImage } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiFloppyDiskBold } from 'react-icons/pi';
|
||||
import { useAddImagesToBoardMutation, useChangeImageIsIntermediateMutation } from 'services/api/endpoints/images';
|
||||
import { uploadImage } from 'services/api/endpoints/images';
|
||||
|
||||
const TOAST_ID = 'SAVE_STAGING_AREA_IMAGE_TO_GALLERY';
|
||||
|
||||
export const StagingAreaToolbarSaveSelectedToGalleryButton = memo(() => {
|
||||
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
|
||||
const selectedImage = useAppSelector(selectSelectedImage);
|
||||
const [addImageToBoard] = useAddImagesToBoardMutation();
|
||||
const [changeIsImageIntermediate] = useChangeImageIsIntermediateMutation();
|
||||
const authToken = useStore($authToken);
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
@@ -19,21 +24,49 @@ export const StagingAreaToolbarSaveSelectedToGalleryButton = memo(() => {
|
||||
if (!selectedImage) {
|
||||
return;
|
||||
}
|
||||
if (autoAddBoardId !== 'none') {
|
||||
await addImageToBoard({ imageDTOs: [selectedImage.imageDTO], board_id: autoAddBoardId }).unwrap();
|
||||
// The changeIsImageIntermediate request will use the board_id on this specific imageDTO object, so we need to
|
||||
// update it before making the request - else the optimistic board updates will get out of whack.
|
||||
changeIsImageIntermediate({
|
||||
imageDTO: { ...selectedImage.imageDTO, board_id: autoAddBoardId },
|
||||
|
||||
// To save the image to gallery, we will download it and re-upload it. This allows the user to delete the image
|
||||
// the gallery without borking the canvas, which may need this image to exist.
|
||||
const result = await withResultAsync(async () => {
|
||||
// Download the image
|
||||
const requestOpts = authToken
|
||||
? {
|
||||
headers: {
|
||||
Authorization: `Bearer ${authToken}`,
|
||||
},
|
||||
}
|
||||
: {};
|
||||
const res = await fetch(selectedImage.imageDTO.image_url, requestOpts);
|
||||
const blob = await res.blob();
|
||||
// Create a new file with the same name, which we will upload
|
||||
const file = new File([blob], `copy_of_${selectedImage.imageDTO.image_name}`, { type: 'image/png' });
|
||||
|
||||
await uploadImage({
|
||||
file,
|
||||
// Image should show up in the Images tab
|
||||
image_category: 'general',
|
||||
is_intermediate: false,
|
||||
// TODO(psyche): Maybe this should just save to the currently-selected board?
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
// We will do our own toast - opt out of the default handling
|
||||
silent: true,
|
||||
});
|
||||
});
|
||||
|
||||
if (result.isOk()) {
|
||||
toast({
|
||||
id: TOAST_ID,
|
||||
title: t('controlLayers.savedToGalleryOk'),
|
||||
status: 'success',
|
||||
});
|
||||
} else {
|
||||
changeIsImageIntermediate({
|
||||
imageDTO: selectedImage.imageDTO,
|
||||
is_intermediate: false,
|
||||
toast({
|
||||
id: TOAST_ID,
|
||||
title: t('controlLayers.savedToGalleryError'),
|
||||
status: 'error',
|
||||
});
|
||||
}
|
||||
}, [addImageToBoard, autoAddBoardId, changeIsImageIntermediate, selectedImage]);
|
||||
}, [autoAddBoardId, selectedImage, t, authToken]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
@@ -42,7 +75,7 @@ export const StagingAreaToolbarSaveSelectedToGalleryButton = memo(() => {
|
||||
icon={<PiFloppyDiskBold />}
|
||||
onClick={saveSelectedImageToGallery}
|
||||
colorScheme="invokeBlue"
|
||||
isDisabled={!selectedImage || !selectedImage.imageDTO.is_intermediate}
|
||||
isDisabled={!selectedImage}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -4,8 +4,8 @@ import { CanvasSettingsPopover } from 'features/controlLayers/components/Setting
|
||||
import { ToolColorPicker } from 'features/controlLayers/components/Tool/ToolFillColorPicker';
|
||||
import { ToolSettings } from 'features/controlLayers/components/Tool/ToolSettings';
|
||||
import { CanvasToolbarFitBboxToLayersButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarFitBboxToLayersButton';
|
||||
import { CanvasToolbarNewSessionMenuButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarNewSessionMenuButton';
|
||||
import { CanvasToolbarRedoButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarRedoButton';
|
||||
import { CanvasToolbarResetCanvasButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarResetCanvasButton';
|
||||
import { CanvasToolbarResetViewButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarResetViewButton';
|
||||
import { CanvasToolbarSaveToGalleryButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarSaveToGalleryButton';
|
||||
import { CanvasToolbarScale } from 'features/controlLayers/components/Toolbar/CanvasToolbarScale';
|
||||
@@ -43,7 +43,7 @@ export const CanvasToolbar = memo(() => {
|
||||
<CanvasToolbarSaveToGalleryButton />
|
||||
<CanvasToolbarUndoButton />
|
||||
<CanvasToolbarRedoButton />
|
||||
<CanvasToolbarResetCanvasButton />
|
||||
<CanvasToolbarNewSessionMenuButton />
|
||||
<CanvasSettingsPopover />
|
||||
</Flex>
|
||||
</Flex>
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
import { IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
|
||||
import { SessionMenuItems } from 'common/components/SessionMenuItems';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiFilePlusBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasToolbarNewSessionMenuButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Menu placement="bottom-end">
|
||||
<MenuButton
|
||||
as={IconButton}
|
||||
aria-label={t('controlLayers.newSession')}
|
||||
icon={<PiFilePlusBold />}
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
/>
|
||||
<MenuList>
|
||||
<SessionMenuItems />
|
||||
</MenuList>
|
||||
</Menu>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasToolbarNewSessionMenuButton.displayName = 'CanvasToolbarNewSessionMenuButton';
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user