mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-16 14:17:58 -05:00
Compare commits
300 Commits
v5.3.1
...
ryan/fix-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e22f0f2203 | ||
|
|
8cfb032051 | ||
|
|
06a9d4e2b2 | ||
|
|
ed46acee79 | ||
|
|
965cd76e33 | ||
|
|
e5e8cbf34c | ||
|
|
3412a52594 | ||
|
|
059336258f | ||
|
|
271cc52c80 | ||
|
|
e44458609f | ||
|
|
69d86a7696 | ||
|
|
56db1a9292 | ||
|
|
cf50e5eeee | ||
|
|
c9c07968d2 | ||
|
|
97d0757176 | ||
|
|
0f51b677a9 | ||
|
|
56ca94c3a9 | ||
|
|
28d169f859 | ||
|
|
92f71d99ee | ||
|
|
0764c02b1d | ||
|
|
081c7569fe | ||
|
|
20f6532ee8 | ||
|
|
b9e8910478 | ||
|
|
ded8391e3c | ||
|
|
e9dd2c396a | ||
|
|
0d86de0cb5 | ||
|
|
549f4e9794 | ||
|
|
ef8ededd2f | ||
|
|
1948ffe106 | ||
|
|
c70f4404c4 | ||
|
|
b157ae928c | ||
|
|
7a0871992d | ||
|
|
b38e2e14f4 | ||
|
|
7c0e70ec84 | ||
|
|
a89ae9d2bf | ||
|
|
ad1fcb3f07 | ||
|
|
87d74b910b | ||
|
|
7ad1c297a4 | ||
|
|
fbc629faa6 | ||
|
|
7baa6b3c09 | ||
|
|
53d482bade | ||
|
|
5aca04b51b | ||
|
|
ea8787c8ff | ||
|
|
cead2c4445 | ||
|
|
f76ac1808c | ||
|
|
f01210861b | ||
|
|
f757f23ef0 | ||
|
|
872a6ef209 | ||
|
|
4267e5ffc4 | ||
|
|
a69c5ff9ef | ||
|
|
3ebd8d7d1b | ||
|
|
1fd80d54a4 | ||
|
|
991f63e455 | ||
|
|
6a1efd3527 | ||
|
|
0eadc0dd9e | ||
|
|
481423d678 | ||
|
|
89ede0aef3 | ||
|
|
359bdee9c6 | ||
|
|
0e6fba3763 | ||
|
|
652502d7a6 | ||
|
|
91d981a49e | ||
|
|
24f61d21b2 | ||
|
|
eb9a4177c5 | ||
|
|
3c43351a5b | ||
|
|
b1359b6dff | ||
|
|
bddccf6d2f | ||
|
|
21ffaab2a2 | ||
|
|
1e969f938f | ||
|
|
9c6c86ee4f | ||
|
|
6b53a48b48 | ||
|
|
c813fa3fc0 | ||
|
|
a08e61184a | ||
|
|
a0d62a5f41 | ||
|
|
616c0f11e1 | ||
|
|
e1626a4e49 | ||
|
|
6ab891a319 | ||
|
|
492de41316 | ||
|
|
c064efc866 | ||
|
|
1a0885bfb1 | ||
|
|
e8b202d0a5 | ||
|
|
c6fc82f756 | ||
|
|
9a77e951d2 | ||
|
|
8bd4207a27 | ||
|
|
0bb601aaf7 | ||
|
|
2da25a0043 | ||
|
|
51d0931898 | ||
|
|
357b68d1ba | ||
|
|
d9ddb6c32e | ||
|
|
ad02a99a83 | ||
|
|
b707dafc7b | ||
|
|
02906c8f5d | ||
|
|
8538e508f1 | ||
|
|
8c333ffd14 | ||
|
|
72ace5fdff | ||
|
|
9b7583fc84 | ||
|
|
989eee338e | ||
|
|
acc3d7b91b | ||
|
|
49de868658 | ||
|
|
b1702c7d90 | ||
|
|
e49e19ea13 | ||
|
|
c9f91f391e | ||
|
|
4cb6b2b701 | ||
|
|
7d132ea148 | ||
|
|
1088accd91 | ||
|
|
8d237d8f8b | ||
|
|
0c86a3232d | ||
|
|
dbfb0359cb | ||
|
|
b4c2aa596b | ||
|
|
87e89b7995 | ||
|
|
9b089430e2 | ||
|
|
f2b0025958 | ||
|
|
4b390906bc | ||
|
|
c5b8efe03b | ||
|
|
4d08d00ad8 | ||
|
|
9b0130262b | ||
|
|
878093f64e | ||
|
|
d5ff7ef250 | ||
|
|
f36583f866 | ||
|
|
829bc1bc7d | ||
|
|
17c7b57145 | ||
|
|
6a12189542 | ||
|
|
96a31a5563 | ||
|
|
067747eca9 | ||
|
|
c7878fddc6 | ||
|
|
54c51e0a06 | ||
|
|
1640ea0298 | ||
|
|
0c32ae9775 | ||
|
|
fdb8ca5165 | ||
|
|
571faf6d7c | ||
|
|
bdbdb22b74 | ||
|
|
9bbb5644af | ||
|
|
e90ad19f22 | ||
|
|
0ba11e8f73 | ||
|
|
1cf7600f5b | ||
|
|
4f9d12b872 | ||
|
|
68c3b0649b | ||
|
|
8ef8bd4261 | ||
|
|
50897ba066 | ||
|
|
3510643870 | ||
|
|
ca9cb1c9ef | ||
|
|
b89caa02bd | ||
|
|
eaf4e08c44 | ||
|
|
fb19621361 | ||
|
|
9179619077 | ||
|
|
13cb5f0ba2 | ||
|
|
7e52fc1c17 | ||
|
|
7f60a4a282 | ||
|
|
3f880496f7 | ||
|
|
f05efd3270 | ||
|
|
79eb8172b6 | ||
|
|
7732b5d478 | ||
|
|
a2a1934b66 | ||
|
|
dff6570078 | ||
|
|
04e4fb63af | ||
|
|
83609d5008 | ||
|
|
2618ed0ae7 | ||
|
|
bb3cedddd5 | ||
|
|
5b3e1593ca | ||
|
|
2d08078a7d | ||
|
|
75acece1f1 | ||
|
|
a9db2ffefd | ||
|
|
cdd148b4d1 | ||
|
|
730fabe2de | ||
|
|
6c59790a7f | ||
|
|
0e6cb91863 | ||
|
|
a0fefcd43f | ||
|
|
c37251d6f7 | ||
|
|
2854210162 | ||
|
|
5545b980af | ||
|
|
0c9434c464 | ||
|
|
8771de917d | ||
|
|
122946ef4c | ||
|
|
2d974f670c | ||
|
|
75f0da9c35 | ||
|
|
5df3c00e28 | ||
|
|
b049880502 | ||
|
|
e5293fdd1a | ||
|
|
8883775762 | ||
|
|
cfadb313d2 | ||
|
|
b5cadd9a1a | ||
|
|
5361b6e014 | ||
|
|
ff346172af | ||
|
|
92f660018b | ||
|
|
1afc2cba4e | ||
|
|
ee8359242c | ||
|
|
f0c80a8d7a | ||
|
|
8da9e7c1f6 | ||
|
|
6d7a486e5b | ||
|
|
57122c6aa3 | ||
|
|
54abd8d4d1 | ||
|
|
06283cffed | ||
|
|
27fa0e1140 | ||
|
|
533d48abdb | ||
|
|
6845cae4c9 | ||
|
|
31c9acb1fa | ||
|
|
fb5e462300 | ||
|
|
2f3abc29b1 | ||
|
|
c5c071f285 | ||
|
|
93a3ed56e7 | ||
|
|
406fc58889 | ||
|
|
cf67d084fd | ||
|
|
d4a95af14f | ||
|
|
8c8e7102c2 | ||
|
|
b6b9ea9d70 | ||
|
|
63126950bc | ||
|
|
29d63d5dea | ||
|
|
a5f8c23dee | ||
|
|
7bb4ea57c6 | ||
|
|
75dc961bcb | ||
|
|
a9a1f6ef21 | ||
|
|
aa40161f26 | ||
|
|
6efa812874 | ||
|
|
8a683f5a3c | ||
|
|
f4b0b6a93d | ||
|
|
1337c33ad3 | ||
|
|
2f6b035138 | ||
|
|
4f9ae44472 | ||
|
|
c682330852 | ||
|
|
c064257759 | ||
|
|
8a4c629576 | ||
|
|
496b02a3bc | ||
|
|
7b5efc2203 | ||
|
|
a01d44f813 | ||
|
|
63fb3a15e9 | ||
|
|
4d0837541b | ||
|
|
999809b4c7 | ||
|
|
c452edfb9f | ||
|
|
ad2cdbd8a2 | ||
|
|
f15c24bfa7 | ||
|
|
d1f653f28c | ||
|
|
244465d3a6 | ||
|
|
c6236ab70c | ||
|
|
644d5cb411 | ||
|
|
bb0a630416 | ||
|
|
2148ae9287 | ||
|
|
42d242609c | ||
|
|
fd0a52392b | ||
|
|
e64415d59a | ||
|
|
1871e0bdbf | ||
|
|
3ae9a965c2 | ||
|
|
85932e35a7 | ||
|
|
41b07a56cc | ||
|
|
54064c0cb8 | ||
|
|
68284b37fa | ||
|
|
ae5bc6f5d6 | ||
|
|
6dc16c9f54 | ||
|
|
faa9ac4e15 | ||
|
|
d0460849b0 | ||
|
|
bed3c2dd77 | ||
|
|
916ddd17d7 | ||
|
|
accfa7407f | ||
|
|
908db31e48 | ||
|
|
b70f632b26 | ||
|
|
d07a6385ab | ||
|
|
68df612fa1 | ||
|
|
3b96c79461 | ||
|
|
89bda5b983 | ||
|
|
22bff1fb22 | ||
|
|
55ba6488d1 | ||
|
|
2d78859171 | ||
|
|
3a661bac34 | ||
|
|
bb8a02de18 | ||
|
|
78155344f6 | ||
|
|
391a24b0f6 | ||
|
|
e75903389f | ||
|
|
27567052f2 | ||
|
|
6f447f7169 | ||
|
|
8b370cc182 | ||
|
|
af583d2971 | ||
|
|
0ebe8fb1bd | ||
|
|
befb629f46 | ||
|
|
874d67cb37 | ||
|
|
19f7a1295a | ||
|
|
78bd605617 | ||
|
|
b87f4e59a5 | ||
|
|
1eca4f12c8 | ||
|
|
f1de11d6bf | ||
|
|
9361ed9d70 | ||
|
|
ebabf4f7a8 | ||
|
|
606f3321f5 | ||
|
|
3970aa30fb | ||
|
|
678436e07c | ||
|
|
c620581699 | ||
|
|
c331d42ce4 | ||
|
|
1ac9b502f1 | ||
|
|
3fa478a12f | ||
|
|
2d86298b7f | ||
|
|
009cdb714c | ||
|
|
9d3f5427b4 | ||
|
|
e4b17f019a | ||
|
|
586c00bc02 | ||
|
|
0f11fda65a | ||
|
|
3e75331ef7 | ||
|
|
be133408ac | ||
|
|
7e1e0d6928 | ||
|
|
cd3d8df5a8 | ||
|
|
24d3c22017 | ||
|
|
b0d37f4e51 | ||
|
|
3559124674 | ||
|
|
6c33e02141 |
1
.github/pull_request_template.md
vendored
1
.github/pull_request_template.md
vendored
@@ -19,3 +19,4 @@
|
||||
- [ ] _The PR has a short but descriptive title, suitable for a changelog_
|
||||
- [ ] _Tests added / updated (if applicable)_
|
||||
- [ ] _Documentation added / updated (if applicable)_
|
||||
- [ ] _Updated `What's New` copy (if doing a release after this PR)_
|
||||
|
||||
14
SECURITY.md
Normal file
14
SECURITY.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Only the latest version of Invoke will receive security updates.
|
||||
We do not currently maintain multiple versions of the application with updates.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
To report a vulnerability, contact the Invoke team directly at security@invoke.ai
|
||||
|
||||
At this time, we do not maintain a formal bug bounty program.
|
||||
|
||||
You can also share identified security issues with our team on huntr.com
|
||||
@@ -99,7 +99,6 @@ their descriptions.
|
||||
| Scale Latents | Scales latents by a given factor. |
|
||||
| Segment Anything Processor | Applies segment anything processing to image |
|
||||
| Show Image | Displays a provided image, and passes it forward in the pipeline. |
|
||||
| Step Param Easing | Experimental per-step parameter easing for denoising steps |
|
||||
| String Primitive Collection | A collection of string primitive values |
|
||||
| String Primitive | A string primitive value |
|
||||
| Subtract Integers | Subtracts two numbers |
|
||||
|
||||
@@ -40,6 +40,8 @@ class AppVersion(BaseModel):
|
||||
|
||||
version: str = Field(description="App version")
|
||||
|
||||
highlights: Optional[list[str]] = Field(default=None, description="Highlights of release")
|
||||
|
||||
|
||||
class AppDependencyVersions(BaseModel):
|
||||
"""App depencency Versions Response"""
|
||||
|
||||
@@ -63,6 +63,7 @@ class Classification(str, Enum, metaclass=MetaEnum):
|
||||
- `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation.
|
||||
- `Deprecated`: The invocation is deprecated and may be removed in a future version.
|
||||
- `Internal`: The invocation is not intended for use by end-users. It may be changed or removed at any time, but is exposed for users to play with.
|
||||
- `Special`: The invocation is a special case and does not fit into any of the other classifications.
|
||||
"""
|
||||
|
||||
Stable = "stable"
|
||||
@@ -70,6 +71,7 @@ class Classification(str, Enum, metaclass=MetaEnum):
|
||||
Prototype = "prototype"
|
||||
Deprecated = "deprecated"
|
||||
Internal = "internal"
|
||||
Special = "special"
|
||||
|
||||
|
||||
class UIConfigBase(BaseModel):
|
||||
|
||||
@@ -1,98 +1,120 @@
|
||||
from typing import Any, Union
|
||||
from typing import Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
from PIL import Image
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, LatentsField
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, LatentsField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
def slerp(
|
||||
t: Union[float, np.ndarray],
|
||||
v0: Union[torch.Tensor, np.ndarray],
|
||||
v1: Union[torch.Tensor, np.ndarray],
|
||||
device: torch.device,
|
||||
DOT_THRESHOLD: float = 0.9995,
|
||||
):
|
||||
"""
|
||||
Spherical linear interpolation
|
||||
Args:
|
||||
t (float/np.ndarray): Float value between 0.0 and 1.0
|
||||
v0 (np.ndarray): Starting vector
|
||||
v1 (np.ndarray): Final vector
|
||||
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
||||
colineal. Not recommended to alter this.
|
||||
Returns:
|
||||
v2 (np.ndarray): Interpolation vector between v0 and v1
|
||||
"""
|
||||
inputs_are_torch = False
|
||||
if not isinstance(v0, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v0 = v0.detach().cpu().numpy()
|
||||
if not isinstance(v1, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v1 = v1.detach().cpu().numpy()
|
||||
|
||||
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
||||
if np.abs(dot) > DOT_THRESHOLD:
|
||||
v2 = (1 - t) * v0 + t * v1
|
||||
else:
|
||||
theta_0 = np.arccos(dot)
|
||||
sin_theta_0 = np.sin(theta_0)
|
||||
theta_t = theta_0 * t
|
||||
sin_theta_t = np.sin(theta_t)
|
||||
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
||||
s1 = sin_theta_t / sin_theta_0
|
||||
v2 = s0 * v0 + s1 * v1
|
||||
|
||||
if inputs_are_torch:
|
||||
v2 = torch.from_numpy(v2).to(device)
|
||||
|
||||
return v2
|
||||
|
||||
|
||||
@invocation(
|
||||
"lblend",
|
||||
title="Blend Latents",
|
||||
tags=["latents", "blend"],
|
||||
tags=["latents", "blend", "mask"],
|
||||
category="latents",
|
||||
version="1.0.3",
|
||||
version="1.1.0",
|
||||
)
|
||||
class BlendLatentsInvocation(BaseInvocation):
|
||||
"""Blend two latents using a given alpha. Latents must have same size."""
|
||||
"""Blend two latents using a given alpha. If a mask is provided, the second latents will be masked before blending.
|
||||
Latents must have same size. Masking functionality added by @dwringer."""
|
||||
|
||||
latents_a: LatentsField = InputField(
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
latents_b: LatentsField = InputField(
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
alpha: float = InputField(default=0.5, description=FieldDescriptions.blend_alpha)
|
||||
latents_a: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
latents_b: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
mask: Optional[ImageField] = InputField(default=None, description="Mask for blending in latents B")
|
||||
alpha: float = InputField(ge=0, default=0.5, description=FieldDescriptions.blend_alpha)
|
||||
|
||||
def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor:
|
||||
if mask_image.mode != "L":
|
||||
mask_image = mask_image.convert("L")
|
||||
mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
|
||||
if mask_tensor.dim() == 3:
|
||||
mask_tensor = mask_tensor.unsqueeze(0)
|
||||
return mask_tensor
|
||||
|
||||
def replace_tensor_from_masked_tensor(
|
||||
self, tensor: torch.Tensor, other_tensor: torch.Tensor, mask_tensor: torch.Tensor
|
||||
):
|
||||
output = tensor.clone()
|
||||
mask_tensor = mask_tensor.expand(output.shape)
|
||||
if output.dtype != torch.float16:
|
||||
output = torch.add(output, mask_tensor * torch.sub(other_tensor, tensor))
|
||||
else:
|
||||
output = torch.add(output, mask_tensor.half() * torch.sub(other_tensor, tensor))
|
||||
return output
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
latents_a = context.tensors.load(self.latents_a.latents_name)
|
||||
latents_b = context.tensors.load(self.latents_b.latents_name)
|
||||
if self.mask is None:
|
||||
mask_tensor = torch.zeros(latents_a.shape[-2:])
|
||||
else:
|
||||
mask_tensor = self.prep_mask_tensor(context.images.get_pil(self.mask.image_name))
|
||||
mask_tensor = tv_resize(mask_tensor, latents_a.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
|
||||
latents_b = self.replace_tensor_from_masked_tensor(latents_b, latents_a, mask_tensor)
|
||||
|
||||
if latents_a.shape != latents_b.shape:
|
||||
raise Exception("Latents to blend must be the same size.")
|
||||
raise ValueError("Latents to blend must be the same size.")
|
||||
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
def slerp(
|
||||
t: Union[float, npt.NDArray[Any]], # FIXME: maybe use np.float32 here?
|
||||
v0: Union[torch.Tensor, npt.NDArray[Any]],
|
||||
v1: Union[torch.Tensor, npt.NDArray[Any]],
|
||||
DOT_THRESHOLD: float = 0.9995,
|
||||
) -> Union[torch.Tensor, npt.NDArray[Any]]:
|
||||
"""
|
||||
Spherical linear interpolation
|
||||
Args:
|
||||
t (float/np.ndarray): Float value between 0.0 and 1.0
|
||||
v0 (np.ndarray): Starting vector
|
||||
v1 (np.ndarray): Final vector
|
||||
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
||||
colineal. Not recommended to alter this.
|
||||
Returns:
|
||||
v2 (np.ndarray): Interpolation vector between v0 and v1
|
||||
"""
|
||||
inputs_are_torch = False
|
||||
if not isinstance(v0, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v0 = v0.detach().cpu().numpy()
|
||||
if not isinstance(v1, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v1 = v1.detach().cpu().numpy()
|
||||
|
||||
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
||||
if np.abs(dot) > DOT_THRESHOLD:
|
||||
v2 = (1 - t) * v0 + t * v1
|
||||
else:
|
||||
theta_0 = np.arccos(dot)
|
||||
sin_theta_0 = np.sin(theta_0)
|
||||
theta_t = theta_0 * t
|
||||
sin_theta_t = np.sin(theta_t)
|
||||
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
||||
s1 = sin_theta_t / sin_theta_0
|
||||
v2 = s0 * v0 + s1 * v1
|
||||
|
||||
if inputs_are_torch:
|
||||
v2_torch: torch.Tensor = torch.from_numpy(v2).to(device)
|
||||
return v2_torch
|
||||
else:
|
||||
assert isinstance(v2, np.ndarray)
|
||||
return v2
|
||||
|
||||
# blend
|
||||
bl = slerp(self.alpha, latents_a, latents_b)
|
||||
assert isinstance(bl, torch.Tensor)
|
||||
blended_latents: torch.Tensor = bl # for type checking convenience
|
||||
blended_latents = slerp(self.alpha, latents_a, latents_b, device)
|
||||
|
||||
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||
blended_latents = blended_latents.to("cpu")
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
name = context.tensors.save(tensor=blended_latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=blended_latents, seed=self.latents_a.seed)
|
||||
return LatentsOutput.build(latents_name=name, latents=blended_latents)
|
||||
|
||||
@@ -95,6 +95,7 @@ class CompelInvocation(BaseInvocation):
|
||||
ti_manager,
|
||||
),
|
||||
):
|
||||
context.util.signal_progress("Building conditioning")
|
||||
assert isinstance(text_encoder, CLIPTextModel)
|
||||
assert isinstance(tokenizer, CLIPTokenizer)
|
||||
compel = Compel(
|
||||
@@ -191,6 +192,7 @@ class SDXLPromptInvocationBase:
|
||||
ti_manager,
|
||||
),
|
||||
):
|
||||
context.util.signal_progress("Building conditioning")
|
||||
assert isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection))
|
||||
assert isinstance(tokenizer, CLIPTokenizer)
|
||||
|
||||
|
||||
1563
invokeai/app/invocations/composition-nodes.py
Normal file
1563
invokeai/app/invocations/composition-nodes.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -65,6 +65,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation):
|
||||
img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
|
||||
# TODO:
|
||||
context.util.signal_progress("Running VAE encoder")
|
||||
masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone())
|
||||
|
||||
masked_latents_name = context.tensors.save(tensor=masked_latents)
|
||||
|
||||
@@ -131,6 +131,7 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
image_tensor = image_tensor.unsqueeze(0)
|
||||
img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
|
||||
context.util.signal_progress("Running VAE encoder")
|
||||
masked_latents = ImageToLatentsInvocation.vae_encode(
|
||||
vae_info, self.fp32, self.tiled, masked_image.clone()
|
||||
)
|
||||
|
||||
@@ -41,6 +41,7 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
# region Model Field Types
|
||||
MainModel = "MainModelField"
|
||||
FluxMainModel = "FluxMainModelField"
|
||||
SD3MainModel = "SD3MainModelField"
|
||||
SDXLMainModel = "SDXLMainModelField"
|
||||
SDXLRefinerModel = "SDXLRefinerModelField"
|
||||
ONNXModel = "ONNXModelField"
|
||||
@@ -52,6 +53,8 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
T2IAdapterModel = "T2IAdapterModelField"
|
||||
T5EncoderModel = "T5EncoderModelField"
|
||||
CLIPEmbedModel = "CLIPEmbedModelField"
|
||||
CLIPLEmbedModel = "CLIPLEmbedModelField"
|
||||
CLIPGEmbedModel = "CLIPGEmbedModelField"
|
||||
SpandrelImageToImageModel = "SpandrelImageToImageModelField"
|
||||
# endregion
|
||||
|
||||
@@ -131,8 +134,10 @@ class FieldDescriptions:
|
||||
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
|
||||
t5_encoder = "T5 tokenizer and text encoder"
|
||||
clip_embed_model = "CLIP Embed loader"
|
||||
clip_g_model = "CLIP-G Embed loader"
|
||||
unet = "UNet (scheduler, LoRAs)"
|
||||
transformer = "Transformer"
|
||||
mmditx = "MMDiTX"
|
||||
vae = "VAE"
|
||||
cond = "Conditioning tensor"
|
||||
controlnet_model = "ControlNet model to load"
|
||||
@@ -140,6 +145,7 @@ class FieldDescriptions:
|
||||
lora_model = "LoRA model to load"
|
||||
main_model = "Main model (UNet, VAE, CLIP) to load"
|
||||
flux_model = "Flux model (Transformer) to load"
|
||||
sd3_model = "SD3 model (MMDiTX) to load"
|
||||
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
|
||||
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
|
||||
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
|
||||
@@ -246,6 +252,12 @@ class FluxConditioningField(BaseModel):
|
||||
conditioning_name: str = Field(description="The name of conditioning tensor")
|
||||
|
||||
|
||||
class SD3ConditioningField(BaseModel):
|
||||
"""A conditioning tensor primitive value"""
|
||||
|
||||
conditioning_name: str = Field(description="The name of conditioning tensor")
|
||||
|
||||
|
||||
class ConditioningField(BaseModel):
|
||||
"""A conditioning tensor primitive value"""
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
title="FLUX Denoise",
|
||||
tags=["image", "flux"],
|
||||
category="image",
|
||||
version="3.2.0",
|
||||
version="3.2.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
@@ -81,6 +81,7 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
description=FieldDescriptions.denoising_start,
|
||||
)
|
||||
denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end)
|
||||
add_noise: bool = InputField(default=True, description="Add noise based on denoising start.")
|
||||
transformer: TransformerField = InputField(
|
||||
description=FieldDescriptions.flux_model,
|
||||
input=Input.Connection,
|
||||
@@ -207,9 +208,12 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"to be poor. Consider using a FLUX dev model instead."
|
||||
)
|
||||
|
||||
# Noise the orig_latents by the appropriate amount for the first timestep.
|
||||
t_0 = timesteps[0]
|
||||
x = t_0 * noise + (1.0 - t_0) * init_latents
|
||||
if self.add_noise:
|
||||
# Noise the orig_latents by the appropriate amount for the first timestep.
|
||||
t_0 = timesteps[0]
|
||||
x = t_0 * noise + (1.0 - t_0) * init_latents
|
||||
else:
|
||||
x = init_latents
|
||||
else:
|
||||
# init_latents are not provided, so we are not doing image-to-image (i.e. we are starting from pure noise).
|
||||
if self.denoising_start > 1e-5:
|
||||
|
||||
89
invokeai/app/invocations/flux_model_loader.py
Normal file
89
invokeai/app/invocations/flux_model_loader.py
Normal file
@@ -0,0 +1,89 @@
|
||||
from typing import Literal
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.util import max_seq_lengths
|
||||
from invokeai.backend.model_manager.config import (
|
||||
CheckpointConfigBase,
|
||||
SubModelType,
|
||||
)
|
||||
|
||||
|
||||
@invocation_output("flux_model_loader_output")
|
||||
class FluxModelLoaderOutput(BaseInvocationOutput):
|
||||
"""Flux base model loader output"""
|
||||
|
||||
transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer")
|
||||
clip: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP")
|
||||
t5_encoder: T5EncoderField = OutputField(description=FieldDescriptions.t5_encoder, title="T5 Encoder")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
max_seq_len: Literal[256, 512] = OutputField(
|
||||
description="The max sequence length to used for the T5 encoder. (256 for schnell transformer, 512 for dev transformer)",
|
||||
title="Max Seq Length",
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_model_loader",
|
||||
title="Flux Main Model",
|
||||
tags=["model", "flux"],
|
||||
category="model",
|
||||
version="1.0.4",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a flux base model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.flux_model,
|
||||
ui_type=UIType.FluxMainModel,
|
||||
input=Input.Direct,
|
||||
)
|
||||
|
||||
t5_encoder_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.t5_encoder, ui_type=UIType.T5EncoderModel, input=Input.Direct, title="T5 Encoder"
|
||||
)
|
||||
|
||||
clip_embed_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.clip_embed_model,
|
||||
ui_type=UIType.CLIPEmbedModel,
|
||||
input=Input.Direct,
|
||||
title="CLIP Embed",
|
||||
)
|
||||
|
||||
vae_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.vae_model, ui_type=UIType.FluxVAEModel, title="VAE"
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:
|
||||
for key in [self.model.key, self.t5_encoder_model.key, self.clip_embed_model.key, self.vae_model.key]:
|
||||
if not context.models.exists(key):
|
||||
raise ValueError(f"Unknown model: {key}")
|
||||
|
||||
transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
vae = self.vae_model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
|
||||
tokenizer = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
clip_encoder = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
|
||||
tokenizer2 = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
t5_encoder = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
|
||||
transformer_config = context.models.get_config(transformer)
|
||||
assert isinstance(transformer_config, CheckpointConfigBase)
|
||||
|
||||
return FluxModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
max_seq_len=max_seq_lengths[transformer_config.config_path],
|
||||
)
|
||||
@@ -5,7 +5,7 @@ import torch
|
||||
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, UIComponent
|
||||
from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
||||
from invokeai.app.invocations.primitives import FluxConditioningOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
@@ -41,7 +41,10 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
t5_max_seq_len: Literal[256, 512] = InputField(
|
||||
description="Max sequence length for the T5 encoder. Expected to be 256 for FLUX schnell models and 512 for FLUX dev models."
|
||||
)
|
||||
prompt: str = InputField(description="Text prompt to encode.")
|
||||
prompt: str = InputField(
|
||||
description="Text prompt to encode.",
|
||||
ui_component=UIComponent.Textarea,
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> FluxConditioningOutput:
|
||||
@@ -71,6 +74,7 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
|
||||
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
|
||||
|
||||
context.util.signal_progress("Running T5 encoder")
|
||||
prompt_embeds = t5_encoder(prompt)
|
||||
|
||||
assert isinstance(prompt_embeds, torch.Tensor)
|
||||
@@ -111,6 +115,7 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
|
||||
clip_encoder = HFEncoder(clip_text_encoder, clip_tokenizer, True, 77)
|
||||
|
||||
context.util.signal_progress("Running CLIP encoder")
|
||||
pooled_prompt_embeds = clip_encoder(prompt)
|
||||
|
||||
assert isinstance(pooled_prompt_embeds, torch.Tensor)
|
||||
|
||||
@@ -41,7 +41,8 @@ class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
def _vae_decode(self, vae_info: LoadedModel, latents: torch.Tensor) -> Image.Image:
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
latents = latents.to(device=TorchDevice.choose_torch_device(), dtype=TorchDevice.choose_torch_dtype())
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
latents = latents.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
|
||||
img = vae.decode(latents)
|
||||
|
||||
img = img.clamp(-1, 1)
|
||||
@@ -53,6 +54,7 @@ class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
context.util.signal_progress("Running VAE")
|
||||
image = self._vae_decode(vae_info=vae_info, latents=latents)
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
@@ -44,9 +44,8 @@ class FluxVaeEncodeInvocation(BaseInvocation):
|
||||
generator = torch.Generator(device=TorchDevice.choose_torch_device()).manual_seed(0)
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
image_tensor = image_tensor.to(
|
||||
device=TorchDevice.choose_torch_device(), dtype=TorchDevice.choose_torch_dtype()
|
||||
)
|
||||
vae_dtype = next(iter(vae.parameters())).dtype
|
||||
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae_dtype)
|
||||
latents = vae.encode(image_tensor, sample=True, generator=generator)
|
||||
return latents
|
||||
|
||||
@@ -60,6 +59,7 @@ class FluxVaeEncodeInvocation(BaseInvocation):
|
||||
if image_tensor.dim() == 3:
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
context.util.signal_progress("Running VAE")
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
|
||||
59
invokeai/app/invocations/image_panels.py
Normal file
59
invokeai/app/invocations/image_panels.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from pydantic import ValidationInfo, field_validator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import InputField, OutputField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
|
||||
|
||||
@invocation_output("image_panel_coordinate_output")
|
||||
class ImagePanelCoordinateOutput(BaseInvocationOutput):
|
||||
x_left: int = OutputField(description="The left x-coordinate of the panel.")
|
||||
y_top: int = OutputField(description="The top y-coordinate of the panel.")
|
||||
width: int = OutputField(description="The width of the panel.")
|
||||
height: int = OutputField(description="The height of the panel.")
|
||||
|
||||
|
||||
@invocation(
|
||||
"image_panel_layout",
|
||||
title="Image Panel Layout",
|
||||
tags=["image", "panel", "layout"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class ImagePanelLayoutInvocation(BaseInvocation):
|
||||
"""Get the coordinates of a single panel in a grid. (If the full image shape cannot be divided evenly into panels,
|
||||
then the grid may not cover the entire image.)
|
||||
"""
|
||||
|
||||
width: int = InputField(description="The width of the entire grid.")
|
||||
height: int = InputField(description="The height of the entire grid.")
|
||||
num_cols: int = InputField(ge=1, default=1, description="The number of columns in the grid.")
|
||||
num_rows: int = InputField(ge=1, default=1, description="The number of rows in the grid.")
|
||||
panel_col_idx: int = InputField(ge=0, default=0, description="The column index of the panel to be processed.")
|
||||
panel_row_idx: int = InputField(ge=0, default=0, description="The row index of the panel to be processed.")
|
||||
|
||||
@field_validator("panel_col_idx")
|
||||
def validate_panel_col_idx(cls, v: int, info: ValidationInfo) -> int:
|
||||
if v < 0 or v >= info.data["num_cols"]:
|
||||
raise ValueError(f"panel_col_idx must be between 0 and {info.data['num_cols'] - 1}")
|
||||
return v
|
||||
|
||||
@field_validator("panel_row_idx")
|
||||
def validate_panel_row_idx(cls, v: int, info: ValidationInfo) -> int:
|
||||
if v < 0 or v >= info.data["num_rows"]:
|
||||
raise ValueError(f"panel_row_idx must be between 0 and {info.data['num_rows'] - 1}")
|
||||
return v
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImagePanelCoordinateOutput:
|
||||
x_left = self.panel_col_idx * (self.width // self.num_cols)
|
||||
y_top = self.panel_row_idx * (self.height // self.num_rows)
|
||||
width = self.width // self.num_cols
|
||||
height = self.height // self.num_rows
|
||||
return ImagePanelCoordinateOutput(x_left=x_left, y_top=y_top, width=width, height=height)
|
||||
@@ -117,6 +117,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
if image_tensor.dim() == 3:
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
context.util.signal_progress("Running VAE encoder")
|
||||
latents = self.vae_encode(
|
||||
vae_info=vae_info, upcast=self.fp32, tiled=self.tiled, image_tensor=image_tensor, tile_size=self.tile_size
|
||||
)
|
||||
|
||||
@@ -60,6 +60,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
|
||||
with SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes), vae_info as vae:
|
||||
context.util.signal_progress("Running VAE decoder")
|
||||
assert isinstance(vae, (AutoencoderKL, AutoencoderTiny))
|
||||
latents = latents.to(vae.device)
|
||||
if self.fp32:
|
||||
|
||||
@@ -147,6 +147,10 @@ GENERATION_MODES = Literal[
|
||||
"flux_img2img",
|
||||
"flux_inpaint",
|
||||
"flux_outpaint",
|
||||
"sd3_txt2img",
|
||||
"sd3_img2img",
|
||||
"sd3_inpaint",
|
||||
"sd3_outpaint",
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import copy
|
||||
from typing import List, Literal, Optional
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -13,11 +13,9 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.shared.models import FreeUConfig
|
||||
from invokeai.backend.flux.util import max_seq_lengths
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
CheckpointConfigBase,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
@@ -139,78 +137,6 @@ class ModelIdentifierInvocation(BaseInvocation):
|
||||
return ModelIdentifierOutput(model=self.model)
|
||||
|
||||
|
||||
@invocation_output("flux_model_loader_output")
|
||||
class FluxModelLoaderOutput(BaseInvocationOutput):
|
||||
"""Flux base model loader output"""
|
||||
|
||||
transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer")
|
||||
clip: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP")
|
||||
t5_encoder: T5EncoderField = OutputField(description=FieldDescriptions.t5_encoder, title="T5 Encoder")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
max_seq_len: Literal[256, 512] = OutputField(
|
||||
description="The max sequence length to used for the T5 encoder. (256 for schnell transformer, 512 for dev transformer)",
|
||||
title="Max Seq Length",
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_model_loader",
|
||||
title="Flux Main Model",
|
||||
tags=["model", "flux"],
|
||||
category="model",
|
||||
version="1.0.4",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a flux base model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.flux_model,
|
||||
ui_type=UIType.FluxMainModel,
|
||||
input=Input.Direct,
|
||||
)
|
||||
|
||||
t5_encoder_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.t5_encoder, ui_type=UIType.T5EncoderModel, input=Input.Direct, title="T5 Encoder"
|
||||
)
|
||||
|
||||
clip_embed_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.clip_embed_model,
|
||||
ui_type=UIType.CLIPEmbedModel,
|
||||
input=Input.Direct,
|
||||
title="CLIP Embed",
|
||||
)
|
||||
|
||||
vae_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.vae_model, ui_type=UIType.FluxVAEModel, title="VAE"
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:
|
||||
for key in [self.model.key, self.t5_encoder_model.key, self.clip_embed_model.key, self.vae_model.key]:
|
||||
if not context.models.exists(key):
|
||||
raise ValueError(f"Unknown model: {key}")
|
||||
|
||||
transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
vae = self.vae_model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
|
||||
tokenizer = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
clip_encoder = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
|
||||
tokenizer2 = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
t5_encoder = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
|
||||
transformer_config = context.models.get_config(transformer)
|
||||
assert isinstance(transformer_config, CheckpointConfigBase)
|
||||
|
||||
return FluxModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
max_seq_len=max_seq_lengths[transformer_config.config_path],
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"main_model_loader",
|
||||
title="Main Model",
|
||||
|
||||
@@ -1,43 +1,4 @@
|
||||
import io
|
||||
from typing import Literal, Optional
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import PIL.Image
|
||||
from easing_functions import (
|
||||
BackEaseIn,
|
||||
BackEaseInOut,
|
||||
BackEaseOut,
|
||||
BounceEaseIn,
|
||||
BounceEaseInOut,
|
||||
BounceEaseOut,
|
||||
CircularEaseIn,
|
||||
CircularEaseInOut,
|
||||
CircularEaseOut,
|
||||
CubicEaseIn,
|
||||
CubicEaseInOut,
|
||||
CubicEaseOut,
|
||||
ElasticEaseIn,
|
||||
ElasticEaseInOut,
|
||||
ElasticEaseOut,
|
||||
ExponentialEaseIn,
|
||||
ExponentialEaseInOut,
|
||||
ExponentialEaseOut,
|
||||
LinearInOut,
|
||||
QuadEaseIn,
|
||||
QuadEaseInOut,
|
||||
QuadEaseOut,
|
||||
QuarticEaseIn,
|
||||
QuarticEaseInOut,
|
||||
QuarticEaseOut,
|
||||
QuinticEaseIn,
|
||||
QuinticEaseInOut,
|
||||
QuinticEaseOut,
|
||||
SineEaseIn,
|
||||
SineEaseInOut,
|
||||
SineEaseOut,
|
||||
)
|
||||
from matplotlib.ticker import MaxNLocator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import InputField
|
||||
@@ -65,191 +26,3 @@ class FloatLinearRangeInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
|
||||
param_list = list(np.linspace(self.start, self.stop, self.steps))
|
||||
return FloatCollectionOutput(collection=param_list)
|
||||
|
||||
|
||||
EASING_FUNCTIONS_MAP = {
|
||||
"Linear": LinearInOut,
|
||||
"QuadIn": QuadEaseIn,
|
||||
"QuadOut": QuadEaseOut,
|
||||
"QuadInOut": QuadEaseInOut,
|
||||
"CubicIn": CubicEaseIn,
|
||||
"CubicOut": CubicEaseOut,
|
||||
"CubicInOut": CubicEaseInOut,
|
||||
"QuarticIn": QuarticEaseIn,
|
||||
"QuarticOut": QuarticEaseOut,
|
||||
"QuarticInOut": QuarticEaseInOut,
|
||||
"QuinticIn": QuinticEaseIn,
|
||||
"QuinticOut": QuinticEaseOut,
|
||||
"QuinticInOut": QuinticEaseInOut,
|
||||
"SineIn": SineEaseIn,
|
||||
"SineOut": SineEaseOut,
|
||||
"SineInOut": SineEaseInOut,
|
||||
"CircularIn": CircularEaseIn,
|
||||
"CircularOut": CircularEaseOut,
|
||||
"CircularInOut": CircularEaseInOut,
|
||||
"ExponentialIn": ExponentialEaseIn,
|
||||
"ExponentialOut": ExponentialEaseOut,
|
||||
"ExponentialInOut": ExponentialEaseInOut,
|
||||
"ElasticIn": ElasticEaseIn,
|
||||
"ElasticOut": ElasticEaseOut,
|
||||
"ElasticInOut": ElasticEaseInOut,
|
||||
"BackIn": BackEaseIn,
|
||||
"BackOut": BackEaseOut,
|
||||
"BackInOut": BackEaseInOut,
|
||||
"BounceIn": BounceEaseIn,
|
||||
"BounceOut": BounceEaseOut,
|
||||
"BounceInOut": BounceEaseInOut,
|
||||
}
|
||||
|
||||
EASING_FUNCTION_KEYS = Literal[tuple(EASING_FUNCTIONS_MAP.keys())]
|
||||
|
||||
|
||||
# actually I think for now could just use CollectionOutput (which is list[Any]
|
||||
@invocation(
|
||||
"step_param_easing",
|
||||
title="Step Param Easing",
|
||||
tags=["step", "easing"],
|
||||
category="step",
|
||||
version="1.0.2",
|
||||
)
|
||||
class StepParamEasingInvocation(BaseInvocation):
|
||||
"""Experimental per-step parameter easing for denoising steps"""
|
||||
|
||||
easing: EASING_FUNCTION_KEYS = InputField(default="Linear", description="The easing function to use")
|
||||
num_steps: int = InputField(default=20, description="number of denoising steps")
|
||||
start_value: float = InputField(default=0.0, description="easing starting value")
|
||||
end_value: float = InputField(default=1.0, description="easing ending value")
|
||||
start_step_percent: float = InputField(default=0.0, description="fraction of steps at which to start easing")
|
||||
end_step_percent: float = InputField(default=1.0, description="fraction of steps after which to end easing")
|
||||
# if None, then start_value is used prior to easing start
|
||||
pre_start_value: Optional[float] = InputField(default=None, description="value before easing start")
|
||||
# if None, then end value is used prior to easing end
|
||||
post_end_value: Optional[float] = InputField(default=None, description="value after easing end")
|
||||
mirror: bool = InputField(default=False, description="include mirror of easing function")
|
||||
# FIXME: add alt_mirror option (alternative to default or mirror), or remove entirely
|
||||
# alt_mirror: bool = InputField(default=False, description="alternative mirroring by dual easing")
|
||||
show_easing_plot: bool = InputField(default=False, description="show easing plot")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
|
||||
log_diagnostics = False
|
||||
# convert from start_step_percent to nearest step <= (steps * start_step_percent)
|
||||
# start_step = int(np.floor(self.num_steps * self.start_step_percent))
|
||||
start_step = int(np.round(self.num_steps * self.start_step_percent))
|
||||
# convert from end_step_percent to nearest step >= (steps * end_step_percent)
|
||||
# end_step = int(np.ceil((self.num_steps - 1) * self.end_step_percent))
|
||||
end_step = int(np.round((self.num_steps - 1) * self.end_step_percent))
|
||||
|
||||
# end_step = int(np.ceil(self.num_steps * self.end_step_percent))
|
||||
num_easing_steps = end_step - start_step + 1
|
||||
|
||||
# num_presteps = max(start_step - 1, 0)
|
||||
num_presteps = start_step
|
||||
num_poststeps = self.num_steps - (num_presteps + num_easing_steps)
|
||||
prelist = list(num_presteps * [self.pre_start_value])
|
||||
postlist = list(num_poststeps * [self.post_end_value])
|
||||
|
||||
if log_diagnostics:
|
||||
context.logger.debug("start_step: " + str(start_step))
|
||||
context.logger.debug("end_step: " + str(end_step))
|
||||
context.logger.debug("num_easing_steps: " + str(num_easing_steps))
|
||||
context.logger.debug("num_presteps: " + str(num_presteps))
|
||||
context.logger.debug("num_poststeps: " + str(num_poststeps))
|
||||
context.logger.debug("prelist size: " + str(len(prelist)))
|
||||
context.logger.debug("postlist size: " + str(len(postlist)))
|
||||
context.logger.debug("prelist: " + str(prelist))
|
||||
context.logger.debug("postlist: " + str(postlist))
|
||||
|
||||
easing_class = EASING_FUNCTIONS_MAP[self.easing]
|
||||
if log_diagnostics:
|
||||
context.logger.debug("easing class: " + str(easing_class))
|
||||
easing_list = []
|
||||
if self.mirror: # "expected" mirroring
|
||||
# if number of steps is even, squeeze duration down to (number_of_steps)/2
|
||||
# and create reverse copy of list to append
|
||||
# if number of steps is odd, squeeze duration down to ceil(number_of_steps/2)
|
||||
# and create reverse copy of list[1:end-1]
|
||||
# but if even then number_of_steps/2 === ceil(number_of_steps/2), so can just use ceil always
|
||||
|
||||
base_easing_duration = int(np.ceil(num_easing_steps / 2.0))
|
||||
if log_diagnostics:
|
||||
context.logger.debug("base easing duration: " + str(base_easing_duration))
|
||||
even_num_steps = num_easing_steps % 2 == 0 # even number of steps
|
||||
easing_function = easing_class(
|
||||
start=self.start_value,
|
||||
end=self.end_value,
|
||||
duration=base_easing_duration - 1,
|
||||
)
|
||||
base_easing_vals = []
|
||||
for step_index in range(base_easing_duration):
|
||||
easing_val = easing_function.ease(step_index)
|
||||
base_easing_vals.append(easing_val)
|
||||
if log_diagnostics:
|
||||
context.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(easing_val))
|
||||
if even_num_steps:
|
||||
mirror_easing_vals = list(reversed(base_easing_vals))
|
||||
else:
|
||||
mirror_easing_vals = list(reversed(base_easing_vals[0:-1]))
|
||||
if log_diagnostics:
|
||||
context.logger.debug("base easing vals: " + str(base_easing_vals))
|
||||
context.logger.debug("mirror easing vals: " + str(mirror_easing_vals))
|
||||
easing_list = base_easing_vals + mirror_easing_vals
|
||||
|
||||
# FIXME: add alt_mirror option (alternative to default or mirror), or remove entirely
|
||||
# elif self.alt_mirror: # function mirroring (unintuitive behavior (at least to me))
|
||||
# # half_ease_duration = round(num_easing_steps - 1 / 2)
|
||||
# half_ease_duration = round((num_easing_steps - 1) / 2)
|
||||
# easing_function = easing_class(start=self.start_value,
|
||||
# end=self.end_value,
|
||||
# duration=half_ease_duration,
|
||||
# )
|
||||
#
|
||||
# mirror_function = easing_class(start=self.end_value,
|
||||
# end=self.start_value,
|
||||
# duration=half_ease_duration,
|
||||
# )
|
||||
# for step_index in range(num_easing_steps):
|
||||
# if step_index <= half_ease_duration:
|
||||
# step_val = easing_function.ease(step_index)
|
||||
# else:
|
||||
# step_val = mirror_function.ease(step_index - half_ease_duration)
|
||||
# easing_list.append(step_val)
|
||||
# if log_diagnostics: logger.debug(step_index, step_val)
|
||||
#
|
||||
|
||||
else: # no mirroring (default)
|
||||
easing_function = easing_class(
|
||||
start=self.start_value,
|
||||
end=self.end_value,
|
||||
duration=num_easing_steps - 1,
|
||||
)
|
||||
for step_index in range(num_easing_steps):
|
||||
step_val = easing_function.ease(step_index)
|
||||
easing_list.append(step_val)
|
||||
if log_diagnostics:
|
||||
context.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(step_val))
|
||||
|
||||
if log_diagnostics:
|
||||
context.logger.debug("prelist size: " + str(len(prelist)))
|
||||
context.logger.debug("easing_list size: " + str(len(easing_list)))
|
||||
context.logger.debug("postlist size: " + str(len(postlist)))
|
||||
|
||||
param_list = prelist + easing_list + postlist
|
||||
|
||||
if self.show_easing_plot:
|
||||
plt.figure()
|
||||
plt.xlabel("Step")
|
||||
plt.ylabel("Param Value")
|
||||
plt.title("Per-Step Values Based On Easing: " + self.easing)
|
||||
plt.bar(range(len(param_list)), param_list)
|
||||
# plt.plot(param_list)
|
||||
ax = plt.gca()
|
||||
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
|
||||
buf = io.BytesIO()
|
||||
plt.savefig(buf, format="png")
|
||||
buf.seek(0)
|
||||
im = PIL.Image.open(buf)
|
||||
im.show()
|
||||
buf.close()
|
||||
|
||||
# output array of size steps, each entry list[i] is param value for step i
|
||||
return FloatCollectionOutput(collection=param_list)
|
||||
|
||||
@@ -4,7 +4,13 @@ from typing import Optional
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
BoundingBoxField,
|
||||
@@ -18,6 +24,7 @@ from invokeai.app.invocations.fields import (
|
||||
InputField,
|
||||
LatentsField,
|
||||
OutputField,
|
||||
SD3ConditioningField,
|
||||
TensorField,
|
||||
UIComponent,
|
||||
)
|
||||
@@ -426,6 +433,17 @@ class FluxConditioningOutput(BaseInvocationOutput):
|
||||
return cls(conditioning=FluxConditioningField(conditioning_name=conditioning_name))
|
||||
|
||||
|
||||
@invocation_output("sd3_conditioning_output")
|
||||
class SD3ConditioningOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a single SD3 conditioning tensor"""
|
||||
|
||||
conditioning: SD3ConditioningField = OutputField(description=FieldDescriptions.cond)
|
||||
|
||||
@classmethod
|
||||
def build(cls, conditioning_name: str) -> "SD3ConditioningOutput":
|
||||
return cls(conditioning=SD3ConditioningField(conditioning_name=conditioning_name))
|
||||
|
||||
|
||||
@invocation_output("conditioning_output")
|
||||
class ConditioningOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a single conditioning tensor"""
|
||||
@@ -521,3 +539,23 @@ class BoundingBoxInvocation(BaseInvocation):
|
||||
|
||||
|
||||
# endregion
|
||||
|
||||
|
||||
@invocation(
|
||||
"image_batch",
|
||||
title="Image Batch",
|
||||
tags=["primitives", "image", "batch", "internal"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class ImageBatchInvocation(BaseInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each image in the batch."""
|
||||
|
||||
images: list[ImageField] = InputField(min_length=1, description="The images to batch over", input=Input.Direct)
|
||||
|
||||
def __init__(self):
|
||||
raise NotImplementedError("This class should never be executed or instantiated directly.")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
raise NotImplementedError("This class should never be executed or instantiated directly.")
|
||||
|
||||
338
invokeai/app/invocations/sd3_denoise.py
Normal file
338
invokeai/app/invocations/sd3_denoise.py
Normal file
@@ -0,0 +1,338 @@
|
||||
from typing import Callable, Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torchvision.transforms as tv_transforms
|
||||
from diffusers.models.transformers.transformer_sd3 import SD3Transformer2DModel
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
LatentsField,
|
||||
SD3ConditioningField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import TransformerField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.invocations.sd3_text_encoder import SD3_T5_MAX_SEQ_LEN
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.sampling_utils import clip_timestep_schedule_fractional
|
||||
from invokeai.backend.model_manager.config import BaseModelType
|
||||
from invokeai.backend.sd3.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import SD3ConditioningInfo
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation(
|
||||
"sd3_denoise",
|
||||
title="SD3 Denoise",
|
||||
tags=["image", "sd3"],
|
||||
category="image",
|
||||
version="1.1.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Run denoising process with a SD3 model."""
|
||||
|
||||
# If latents is provided, this means we are doing image-to-image.
|
||||
latents: Optional[LatentsField] = InputField(
|
||||
default=None, description=FieldDescriptions.latents, input=Input.Connection
|
||||
)
|
||||
# denoise_mask is used for image-to-image inpainting. Only the masked region is modified.
|
||||
denoise_mask: Optional[DenoiseMaskField] = InputField(
|
||||
default=None, description=FieldDescriptions.denoise_mask, input=Input.Connection
|
||||
)
|
||||
denoising_start: float = InputField(default=0.0, ge=0, le=1, description=FieldDescriptions.denoising_start)
|
||||
denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end)
|
||||
transformer: TransformerField = InputField(
|
||||
description=FieldDescriptions.sd3_model, input=Input.Connection, title="Transformer"
|
||||
)
|
||||
positive_conditioning: SD3ConditioningField = InputField(
|
||||
description=FieldDescriptions.positive_cond, input=Input.Connection
|
||||
)
|
||||
negative_conditioning: SD3ConditioningField = InputField(
|
||||
description=FieldDescriptions.negative_cond, input=Input.Connection
|
||||
)
|
||||
cfg_scale: float | list[float] = InputField(default=3.5, description=FieldDescriptions.cfg_scale, title="CFG Scale")
|
||||
width: int = InputField(default=1024, multiple_of=16, description="Width of the generated image.")
|
||||
height: int = InputField(default=1024, multiple_of=16, description="Height of the generated image.")
|
||||
steps: int = InputField(default=10, gt=0, description=FieldDescriptions.steps)
|
||||
seed: int = InputField(default=0, description="Randomness seed for reproducibility.")
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
latents = self._run_diffusion(context)
|
||||
latents = latents.detach().to("cpu")
|
||||
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
|
||||
def _prep_inpaint_mask(self, context: InvocationContext, latents: torch.Tensor) -> torch.Tensor | None:
|
||||
"""Prepare the inpaint mask.
|
||||
- Loads the mask
|
||||
- Resizes if necessary
|
||||
- Casts to same device/dtype as latents
|
||||
|
||||
Args:
|
||||
context (InvocationContext): The invocation context, for loading the inpaint mask.
|
||||
latents (torch.Tensor): A latent image tensor. Used to determine the target shape, device, and dtype for the
|
||||
inpaint mask.
|
||||
|
||||
Returns:
|
||||
torch.Tensor | None: Inpaint mask. Values of 0.0 represent the regions to be fully denoised, and 1.0
|
||||
represent the regions to be preserved.
|
||||
"""
|
||||
if self.denoise_mask is None:
|
||||
return None
|
||||
mask = context.tensors.load(self.denoise_mask.mask_name)
|
||||
|
||||
# The input denoise_mask contains values in [0, 1], where 0.0 represents the regions to be fully denoised, and
|
||||
# 1.0 represents the regions to be preserved.
|
||||
# We invert the mask so that the regions to be preserved are 0.0 and the regions to be denoised are 1.0.
|
||||
mask = 1.0 - mask
|
||||
|
||||
_, _, latent_height, latent_width = latents.shape
|
||||
mask = tv_resize(
|
||||
img=mask,
|
||||
size=[latent_height, latent_width],
|
||||
interpolation=tv_transforms.InterpolationMode.BILINEAR,
|
||||
antialias=False,
|
||||
)
|
||||
|
||||
mask = mask.to(device=latents.device, dtype=latents.dtype)
|
||||
return mask
|
||||
|
||||
def _load_text_conditioning(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
conditioning_name: str,
|
||||
joint_attention_dim: int,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
# Load the conditioning data.
|
||||
cond_data = context.conditioning.load(conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
sd3_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(sd3_conditioning, SD3ConditioningInfo)
|
||||
sd3_conditioning = sd3_conditioning.to(dtype=dtype, device=device)
|
||||
|
||||
t5_embeds = sd3_conditioning.t5_embeds
|
||||
if t5_embeds is None:
|
||||
t5_embeds = torch.zeros(
|
||||
(1, SD3_T5_MAX_SEQ_LEN, joint_attention_dim),
|
||||
device=device,
|
||||
dtype=dtype,
|
||||
)
|
||||
|
||||
clip_prompt_embeds = torch.cat([sd3_conditioning.clip_l_embeds, sd3_conditioning.clip_g_embeds], dim=-1)
|
||||
clip_prompt_embeds = torch.nn.functional.pad(
|
||||
clip_prompt_embeds, (0, t5_embeds.shape[-1] - clip_prompt_embeds.shape[-1])
|
||||
)
|
||||
|
||||
prompt_embeds = torch.cat([clip_prompt_embeds, t5_embeds], dim=-2)
|
||||
pooled_prompt_embeds = torch.cat(
|
||||
[sd3_conditioning.clip_l_pooled_embeds, sd3_conditioning.clip_g_pooled_embeds], dim=-1
|
||||
)
|
||||
|
||||
return prompt_embeds, pooled_prompt_embeds
|
||||
|
||||
def _get_noise(
|
||||
self,
|
||||
num_samples: int,
|
||||
num_channels_latents: int,
|
||||
height: int,
|
||||
width: int,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
seed: int,
|
||||
) -> torch.Tensor:
|
||||
# We always generate noise on the same device and dtype then cast to ensure consistency across devices/dtypes.
|
||||
rand_device = "cpu"
|
||||
rand_dtype = torch.float16
|
||||
|
||||
return torch.randn(
|
||||
num_samples,
|
||||
num_channels_latents,
|
||||
int(height) // LATENT_SCALE_FACTOR,
|
||||
int(width) // LATENT_SCALE_FACTOR,
|
||||
device=rand_device,
|
||||
dtype=rand_dtype,
|
||||
generator=torch.Generator(device=rand_device).manual_seed(seed),
|
||||
).to(device=device, dtype=dtype)
|
||||
|
||||
def _prepare_cfg_scale(self, num_timesteps: int) -> list[float]:
|
||||
"""Prepare the CFG scale list.
|
||||
|
||||
Args:
|
||||
num_timesteps (int): The number of timesteps in the scheduler. Could be different from num_steps depending
|
||||
on the scheduler used (e.g. higher order schedulers).
|
||||
|
||||
Returns:
|
||||
list[float]: _description_
|
||||
"""
|
||||
if isinstance(self.cfg_scale, float):
|
||||
cfg_scale = [self.cfg_scale] * num_timesteps
|
||||
elif isinstance(self.cfg_scale, list):
|
||||
assert len(self.cfg_scale) == num_timesteps
|
||||
cfg_scale = self.cfg_scale
|
||||
else:
|
||||
raise ValueError(f"Invalid CFG scale type: {type(self.cfg_scale)}")
|
||||
|
||||
return cfg_scale
|
||||
|
||||
def _run_diffusion(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
):
|
||||
inference_dtype = TorchDevice.choose_torch_dtype()
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
transformer_info = context.models.load(self.transformer.transformer)
|
||||
|
||||
# Load/process the conditioning data.
|
||||
# TODO(ryand): Make CFG optional.
|
||||
do_classifier_free_guidance = True
|
||||
pos_prompt_embeds, pos_pooled_prompt_embeds = self._load_text_conditioning(
|
||||
context=context,
|
||||
conditioning_name=self.positive_conditioning.conditioning_name,
|
||||
joint_attention_dim=transformer_info.model.config.joint_attention_dim,
|
||||
dtype=inference_dtype,
|
||||
device=device,
|
||||
)
|
||||
neg_prompt_embeds, neg_pooled_prompt_embeds = self._load_text_conditioning(
|
||||
context=context,
|
||||
conditioning_name=self.negative_conditioning.conditioning_name,
|
||||
joint_attention_dim=transformer_info.model.config.joint_attention_dim,
|
||||
dtype=inference_dtype,
|
||||
device=device,
|
||||
)
|
||||
# TODO(ryand): Support both sequential and batched CFG inference.
|
||||
prompt_embeds = torch.cat([neg_prompt_embeds, pos_prompt_embeds], dim=0)
|
||||
pooled_prompt_embeds = torch.cat([neg_pooled_prompt_embeds, pos_pooled_prompt_embeds], dim=0)
|
||||
|
||||
# Prepare the timestep schedule.
|
||||
# We add an extra step to the end to account for the final timestep of 0.0.
|
||||
timesteps: list[float] = torch.linspace(1, 0, self.steps + 1).tolist()
|
||||
# Clip the timesteps schedule based on denoising_start and denoising_end.
|
||||
timesteps = clip_timestep_schedule_fractional(timesteps, self.denoising_start, self.denoising_end)
|
||||
total_steps = len(timesteps) - 1
|
||||
|
||||
# Prepare the CFG scale list.
|
||||
cfg_scale = self._prepare_cfg_scale(total_steps)
|
||||
|
||||
# Load the input latents, if provided.
|
||||
init_latents = context.tensors.load(self.latents.latents_name) if self.latents else None
|
||||
if init_latents is not None:
|
||||
init_latents = init_latents.to(device=device, dtype=inference_dtype)
|
||||
|
||||
# Generate initial latent noise.
|
||||
num_channels_latents = transformer_info.model.config.in_channels
|
||||
assert isinstance(num_channels_latents, int)
|
||||
noise = self._get_noise(
|
||||
num_samples=1,
|
||||
num_channels_latents=num_channels_latents,
|
||||
height=self.height,
|
||||
width=self.width,
|
||||
dtype=inference_dtype,
|
||||
device=device,
|
||||
seed=self.seed,
|
||||
)
|
||||
|
||||
# Prepare input latent image.
|
||||
if init_latents is not None:
|
||||
# Noise the init_latents by the appropriate amount for the first timestep.
|
||||
t_0 = timesteps[0]
|
||||
latents = t_0 * noise + (1.0 - t_0) * init_latents
|
||||
else:
|
||||
# init_latents are not provided, so we are not doing image-to-image (i.e. we are starting from pure noise).
|
||||
if self.denoising_start > 1e-5:
|
||||
raise ValueError("denoising_start should be 0 when initial latents are not provided.")
|
||||
latents = noise
|
||||
|
||||
# If len(timesteps) == 1, then short-circuit. We are just noising the input latents, but not taking any
|
||||
# denoising steps.
|
||||
if len(timesteps) <= 1:
|
||||
return latents
|
||||
|
||||
# Prepare inpaint extension.
|
||||
inpaint_mask = self._prep_inpaint_mask(context, latents)
|
||||
inpaint_extension: InpaintExtension | None = None
|
||||
if inpaint_mask is not None:
|
||||
assert init_latents is not None
|
||||
inpaint_extension = InpaintExtension(
|
||||
init_latents=init_latents,
|
||||
inpaint_mask=inpaint_mask,
|
||||
noise=noise,
|
||||
)
|
||||
|
||||
step_callback = self._build_step_callback(context)
|
||||
|
||||
step_callback(
|
||||
PipelineIntermediateState(
|
||||
step=0,
|
||||
order=1,
|
||||
total_steps=total_steps,
|
||||
timestep=int(timesteps[0]),
|
||||
latents=latents,
|
||||
),
|
||||
)
|
||||
|
||||
with transformer_info.model_on_device() as (cached_weights, transformer):
|
||||
assert isinstance(transformer, SD3Transformer2DModel)
|
||||
|
||||
# 6. Denoising loop
|
||||
for step_idx, (t_curr, t_prev) in tqdm(list(enumerate(zip(timesteps[:-1], timesteps[1:], strict=True)))):
|
||||
# Expand the latents if we are doing CFG.
|
||||
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
||||
# Expand the timestep to match the latent model input.
|
||||
# Multiply by 1000 to match the default FlowMatchEulerDiscreteScheduler num_train_timesteps.
|
||||
timestep = torch.tensor([t_curr * 1000], device=device).expand(latent_model_input.shape[0])
|
||||
|
||||
noise_pred = transformer(
|
||||
hidden_states=latent_model_input,
|
||||
timestep=timestep,
|
||||
encoder_hidden_states=prompt_embeds,
|
||||
pooled_projections=pooled_prompt_embeds,
|
||||
joint_attention_kwargs=None,
|
||||
return_dict=False,
|
||||
)[0]
|
||||
|
||||
# Apply CFG.
|
||||
if do_classifier_free_guidance:
|
||||
noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2)
|
||||
noise_pred = noise_pred_uncond + cfg_scale[step_idx] * (noise_pred_cond - noise_pred_uncond)
|
||||
|
||||
# Compute the previous noisy sample x_t -> x_t-1.
|
||||
latents_dtype = latents.dtype
|
||||
latents = latents.to(dtype=torch.float32)
|
||||
latents = latents + (t_prev - t_curr) * noise_pred
|
||||
latents = latents.to(dtype=latents_dtype)
|
||||
|
||||
if inpaint_extension is not None:
|
||||
latents = inpaint_extension.merge_intermediate_latents_with_init_latents(latents, t_prev)
|
||||
|
||||
step_callback(
|
||||
PipelineIntermediateState(
|
||||
step=step_idx + 1,
|
||||
order=1,
|
||||
total_steps=total_steps,
|
||||
timestep=int(t_curr),
|
||||
latents=latents,
|
||||
),
|
||||
)
|
||||
|
||||
return latents
|
||||
|
||||
def _build_step_callback(self, context: InvocationContext) -> Callable[[PipelineIntermediateState], None]:
|
||||
def step_callback(state: PipelineIntermediateState) -> None:
|
||||
context.util.sd_step_callback(state, BaseModelType.StableDiffusion3)
|
||||
|
||||
return step_callback
|
||||
65
invokeai/app/invocations/sd3_image_to_latents.py
Normal file
65
invokeai/app/invocations/sd3_image_to_latents.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import einops
|
||||
import torch
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import VAEField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
|
||||
|
||||
@invocation(
|
||||
"sd3_i2l",
|
||||
title="SD3 Image to Latents",
|
||||
tags=["image", "latents", "vae", "i2l", "sd3"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates latents from an image."""
|
||||
|
||||
image: ImageField = InputField(description="The image to encode")
|
||||
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection)
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(vae_info: LoadedModel, image_tensor: torch.Tensor) -> torch.Tensor:
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae, AutoencoderKL)
|
||||
|
||||
vae.disable_tiling()
|
||||
|
||||
image_tensor = image_tensor.to(device=vae.device, dtype=vae.dtype)
|
||||
with torch.inference_mode():
|
||||
image_tensor_dist = vae.encode(image_tensor).latent_dist
|
||||
# TODO: Use seed to make sampling reproducible.
|
||||
latents: torch.Tensor = image_tensor_dist.sample().to(dtype=vae.dtype)
|
||||
|
||||
latents = vae.config.scaling_factor * latents
|
||||
|
||||
return latents
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
if image_tensor.dim() == 3:
|
||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
latents = self.vae_encode(vae_info=vae_info, image_tensor=image_tensor)
|
||||
|
||||
latents = latents.to("cpu")
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
74
invokeai/app/invocations/sd3_latents_to_image.py
Normal file
74
invokeai/app/invocations/sd3_latents_to_image.py
Normal file
@@ -0,0 +1,74 @@
|
||||
from contextlib import nullcontext
|
||||
|
||||
import torch
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
LatentsField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import VAEField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.extensions.seamless import SeamlessExt
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation(
|
||||
"sd3_l2i",
|
||||
title="SD3 Latents to Image",
|
||||
tags=["latents", "image", "vae", "l2i", "sd3"],
|
||||
category="latents",
|
||||
version="1.3.0",
|
||||
)
|
||||
class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates an image from latents."""
|
||||
|
||||
latents: LatentsField = InputField(
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
vae: VAEField = InputField(
|
||||
description=FieldDescriptions.vae,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL))
|
||||
with SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes), vae_info as vae:
|
||||
context.util.signal_progress("Running VAE")
|
||||
assert isinstance(vae, (AutoencoderKL))
|
||||
latents = latents.to(vae.device)
|
||||
|
||||
vae.disable_tiling()
|
||||
|
||||
tiling_context = nullcontext()
|
||||
|
||||
# clear memory as vae decode can request a lot
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
with torch.inference_mode(), tiling_context:
|
||||
# copied from diffusers pipeline
|
||||
latents = latents / vae.config.scaling_factor
|
||||
img = vae.decode(latents, return_dict=False)[0]
|
||||
|
||||
img = img.clamp(-1, 1)
|
||||
img = rearrange(img[0], "c h w -> h w c") # noqa: F821
|
||||
img_pil = Image.fromarray((127.5 * (img + 1.0)).byte().cpu().numpy())
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
image_dto = context.images.save(image=img_pil)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
108
invokeai/app/invocations/sd3_model_loader.py
Normal file
108
invokeai/app/invocations/sd3_model_loader.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from typing import Optional
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import SubModelType
|
||||
|
||||
|
||||
@invocation_output("sd3_model_loader_output")
|
||||
class Sd3ModelLoaderOutput(BaseInvocationOutput):
|
||||
"""SD3 base model loader output."""
|
||||
|
||||
transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer")
|
||||
clip_l: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP L")
|
||||
clip_g: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP G")
|
||||
t5_encoder: T5EncoderField = OutputField(description=FieldDescriptions.t5_encoder, title="T5 Encoder")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation(
|
||||
"sd3_model_loader",
|
||||
title="SD3 Main Model",
|
||||
tags=["model", "sd3"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class Sd3ModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a SD3 base model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.sd3_model,
|
||||
ui_type=UIType.SD3MainModel,
|
||||
input=Input.Direct,
|
||||
)
|
||||
|
||||
t5_encoder_model: Optional[ModelIdentifierField] = InputField(
|
||||
description=FieldDescriptions.t5_encoder,
|
||||
ui_type=UIType.T5EncoderModel,
|
||||
input=Input.Direct,
|
||||
title="T5 Encoder",
|
||||
default=None,
|
||||
)
|
||||
|
||||
clip_l_model: Optional[ModelIdentifierField] = InputField(
|
||||
description=FieldDescriptions.clip_embed_model,
|
||||
ui_type=UIType.CLIPLEmbedModel,
|
||||
input=Input.Direct,
|
||||
title="CLIP L Encoder",
|
||||
default=None,
|
||||
)
|
||||
|
||||
clip_g_model: Optional[ModelIdentifierField] = InputField(
|
||||
description=FieldDescriptions.clip_g_model,
|
||||
ui_type=UIType.CLIPGEmbedModel,
|
||||
input=Input.Direct,
|
||||
title="CLIP G Encoder",
|
||||
default=None,
|
||||
)
|
||||
|
||||
vae_model: Optional[ModelIdentifierField] = InputField(
|
||||
description=FieldDescriptions.vae_model, ui_type=UIType.VAEModel, title="VAE", default=None
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> Sd3ModelLoaderOutput:
|
||||
transformer = self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
vae = (
|
||||
self.vae_model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
if self.vae_model
|
||||
else self.model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
)
|
||||
tokenizer_l = self.model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
clip_encoder_l = (
|
||||
self.clip_l_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
if self.clip_l_model
|
||||
else self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
)
|
||||
tokenizer_g = self.model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
clip_encoder_g = (
|
||||
self.clip_g_model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
if self.clip_g_model
|
||||
else self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
)
|
||||
tokenizer_t5 = (
|
||||
self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer3})
|
||||
if self.t5_encoder_model
|
||||
else self.model.model_copy(update={"submodel_type": SubModelType.Tokenizer3})
|
||||
)
|
||||
t5_encoder = (
|
||||
self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder3})
|
||||
if self.t5_encoder_model
|
||||
else self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder3})
|
||||
)
|
||||
|
||||
return Sd3ModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
clip_l=CLIPField(tokenizer=tokenizer_l, text_encoder=clip_encoder_l, loras=[], skipped_layers=0),
|
||||
clip_g=CLIPField(tokenizer=tokenizer_g, text_encoder=clip_encoder_g, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer_t5, text_encoder=t5_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
)
|
||||
201
invokeai/app/invocations/sd3_text_encoder.py
Normal file
201
invokeai/app/invocations/sd3_text_encoder.py
Normal file
@@ -0,0 +1,201 @@
|
||||
from contextlib import ExitStack
|
||||
from typing import Iterator, Tuple
|
||||
|
||||
import torch
|
||||
from transformers import (
|
||||
CLIPTextModel,
|
||||
CLIPTextModelWithProjection,
|
||||
CLIPTokenizer,
|
||||
T5EncoderModel,
|
||||
T5Tokenizer,
|
||||
T5TokenizerFast,
|
||||
)
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
|
||||
from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
||||
from invokeai.app.invocations.primitives import SD3ConditioningOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.lora.conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_patcher import LoRAPatcher
|
||||
from invokeai.backend.model_manager.config import ModelFormat
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, SD3ConditioningInfo
|
||||
|
||||
# The SD3 T5 Max Sequence Length set based on the default in diffusers.
|
||||
SD3_T5_MAX_SEQ_LEN = 256
|
||||
|
||||
|
||||
@invocation(
|
||||
"sd3_text_encoder",
|
||||
title="SD3 Text Encoding",
|
||||
tags=["prompt", "conditioning", "sd3"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class Sd3TextEncoderInvocation(BaseInvocation):
|
||||
"""Encodes and preps a prompt for a SD3 image."""
|
||||
|
||||
clip_l: CLIPField = InputField(
|
||||
title="CLIP L",
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
)
|
||||
clip_g: CLIPField = InputField(
|
||||
title="CLIP G",
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
# The SD3 models were trained with text encoder dropout, so the T5 encoder can be omitted to save time/memory.
|
||||
t5_encoder: T5EncoderField | None = InputField(
|
||||
title="T5Encoder",
|
||||
default=None,
|
||||
description=FieldDescriptions.t5_encoder,
|
||||
input=Input.Connection,
|
||||
)
|
||||
prompt: str = InputField(description="Text prompt to encode.")
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> SD3ConditioningOutput:
|
||||
# Note: The text encoding model are run in separate functions to ensure that all model references are locally
|
||||
# scoped. This ensures that earlier models can be freed and gc'd before loading later models (if necessary).
|
||||
|
||||
clip_l_embeddings, clip_l_pooled_embeddings = self._clip_encode(context, self.clip_l)
|
||||
clip_g_embeddings, clip_g_pooled_embeddings = self._clip_encode(context, self.clip_g)
|
||||
|
||||
t5_embeddings: torch.Tensor | None = None
|
||||
if self.t5_encoder is not None:
|
||||
t5_embeddings = self._t5_encode(context, SD3_T5_MAX_SEQ_LEN)
|
||||
|
||||
conditioning_data = ConditioningFieldData(
|
||||
conditionings=[
|
||||
SD3ConditioningInfo(
|
||||
clip_l_embeds=clip_l_embeddings,
|
||||
clip_l_pooled_embeds=clip_l_pooled_embeddings,
|
||||
clip_g_embeds=clip_g_embeddings,
|
||||
clip_g_pooled_embeds=clip_g_pooled_embeddings,
|
||||
t5_embeds=t5_embeddings,
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
conditioning_name = context.conditioning.save(conditioning_data)
|
||||
return SD3ConditioningOutput.build(conditioning_name)
|
||||
|
||||
def _t5_encode(self, context: InvocationContext, max_seq_len: int) -> torch.Tensor:
|
||||
assert self.t5_encoder is not None
|
||||
t5_tokenizer_info = context.models.load(self.t5_encoder.tokenizer)
|
||||
t5_text_encoder_info = context.models.load(self.t5_encoder.text_encoder)
|
||||
|
||||
prompt = [self.prompt]
|
||||
|
||||
with (
|
||||
t5_text_encoder_info as t5_text_encoder,
|
||||
t5_tokenizer_info as t5_tokenizer,
|
||||
):
|
||||
context.util.signal_progress("Running T5 encoder")
|
||||
assert isinstance(t5_text_encoder, T5EncoderModel)
|
||||
assert isinstance(t5_tokenizer, (T5Tokenizer, T5TokenizerFast))
|
||||
|
||||
text_inputs = t5_tokenizer(
|
||||
prompt,
|
||||
padding="max_length",
|
||||
max_length=max_seq_len,
|
||||
truncation=True,
|
||||
add_special_tokens=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
text_input_ids = text_inputs.input_ids
|
||||
untruncated_ids = t5_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
||||
assert isinstance(text_input_ids, torch.Tensor)
|
||||
assert isinstance(untruncated_ids, torch.Tensor)
|
||||
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
||||
text_input_ids, untruncated_ids
|
||||
):
|
||||
removed_text = t5_tokenizer.batch_decode(untruncated_ids[:, max_seq_len - 1 : -1])
|
||||
context.logger.warning(
|
||||
"The following part of your input was truncated because `max_sequence_length` is set to "
|
||||
f" {max_seq_len} tokens: {removed_text}"
|
||||
)
|
||||
|
||||
prompt_embeds = t5_text_encoder(text_input_ids.to(t5_text_encoder.device))[0]
|
||||
|
||||
assert isinstance(prompt_embeds, torch.Tensor)
|
||||
return prompt_embeds
|
||||
|
||||
def _clip_encode(
|
||||
self, context: InvocationContext, clip_model: CLIPField, tokenizer_max_length: int = 77
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
clip_tokenizer_info = context.models.load(clip_model.tokenizer)
|
||||
clip_text_encoder_info = context.models.load(clip_model.text_encoder)
|
||||
|
||||
prompt = [self.prompt]
|
||||
|
||||
with (
|
||||
clip_text_encoder_info.model_on_device() as (cached_weights, clip_text_encoder),
|
||||
clip_tokenizer_info as clip_tokenizer,
|
||||
ExitStack() as exit_stack,
|
||||
):
|
||||
context.util.signal_progress("Running CLIP encoder")
|
||||
assert isinstance(clip_text_encoder, (CLIPTextModel, CLIPTextModelWithProjection))
|
||||
assert isinstance(clip_tokenizer, CLIPTokenizer)
|
||||
|
||||
clip_text_encoder_config = clip_text_encoder_info.config
|
||||
assert clip_text_encoder_config is not None
|
||||
|
||||
# Apply LoRA models to the CLIP encoder.
|
||||
# Note: We apply the LoRA after the transformer has been moved to its target device for faster patching.
|
||||
if clip_text_encoder_config.format in [ModelFormat.Diffusers]:
|
||||
# The model is non-quantized, so we can apply the LoRA weights directly into the model.
|
||||
exit_stack.enter_context(
|
||||
LoRAPatcher.apply_lora_patches(
|
||||
model=clip_text_encoder,
|
||||
patches=self._clip_lora_iterator(context, clip_model),
|
||||
prefix=FLUX_LORA_CLIP_PREFIX,
|
||||
cached_weights=cached_weights,
|
||||
)
|
||||
)
|
||||
else:
|
||||
# There are currently no supported CLIP quantized models. Add support here if needed.
|
||||
raise ValueError(f"Unsupported model format: {clip_text_encoder_config.format}")
|
||||
|
||||
clip_text_encoder = clip_text_encoder.eval().requires_grad_(False)
|
||||
|
||||
text_inputs = clip_tokenizer(
|
||||
prompt,
|
||||
padding="max_length",
|
||||
max_length=tokenizer_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
text_input_ids = text_inputs.input_ids
|
||||
untruncated_ids = clip_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
||||
assert isinstance(text_input_ids, torch.Tensor)
|
||||
assert isinstance(untruncated_ids, torch.Tensor)
|
||||
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
||||
text_input_ids, untruncated_ids
|
||||
):
|
||||
removed_text = clip_tokenizer.batch_decode(untruncated_ids[:, tokenizer_max_length - 1 : -1])
|
||||
context.logger.warning(
|
||||
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
||||
f" {tokenizer_max_length} tokens: {removed_text}"
|
||||
)
|
||||
prompt_embeds = clip_text_encoder(
|
||||
input_ids=text_input_ids.to(clip_text_encoder.device), output_hidden_states=True
|
||||
)
|
||||
pooled_prompt_embeds = prompt_embeds[0]
|
||||
prompt_embeds = prompt_embeds.hidden_states[-2]
|
||||
|
||||
return prompt_embeds, pooled_prompt_embeds
|
||||
|
||||
def _clip_lora_iterator(
|
||||
self, context: InvocationContext, clip_model: CLIPField
|
||||
) -> Iterator[Tuple[LoRAModelRaw, float]]:
|
||||
for lora in clip_model.loras:
|
||||
lora_info = context.models.load(lora.lora)
|
||||
assert isinstance(lora_info.model, LoRAModelRaw)
|
||||
yield (lora_info.model, lora.weight)
|
||||
del lora_info
|
||||
@@ -5,7 +5,7 @@ from typing import Literal
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
from pydantic import BaseModel, Field
|
||||
from transformers import AutoModelForMaskGeneration, AutoProcessor
|
||||
from transformers.models.sam import SamModel
|
||||
from transformers.models.sam.processing_sam import SamProcessor
|
||||
@@ -77,19 +77,14 @@ class SegmentAnythingInvocation(BaseInvocation):
|
||||
default="all",
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def check_point_lists_or_bounding_box(self):
|
||||
if self.point_lists is None and self.bounding_boxes is None:
|
||||
raise ValueError("Either point_lists or bounding_box must be provided.")
|
||||
elif self.point_lists is not None and self.bounding_boxes is not None:
|
||||
raise ValueError("Only one of point_lists or bounding_box can be provided.")
|
||||
return self
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> MaskOutput:
|
||||
# The models expect a 3-channel RGB image.
|
||||
image_pil = context.images.get_pil(self.image.image_name, mode="RGB")
|
||||
|
||||
if self.point_lists is not None and self.bounding_boxes is not None:
|
||||
raise ValueError("Only one of point_lists or bounding_box can be provided.")
|
||||
|
||||
if (not self.bounding_boxes or len(self.bounding_boxes) == 0) and (
|
||||
not self.point_lists or len(self.point_lists) == 0
|
||||
):
|
||||
|
||||
@@ -86,7 +86,7 @@ class ModelLoadService(ModelLoadServiceBase):
|
||||
|
||||
def torch_load_file(checkpoint: Path) -> AnyModel:
|
||||
scan_result = scan_file_path(checkpoint)
|
||||
if scan_result.infected_files != 0:
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise Exception("The model at {checkpoint} is potentially infected by malware. Aborting load.")
|
||||
result = torch_load(checkpoint, map_location="cpu")
|
||||
return result
|
||||
|
||||
@@ -15,6 +15,7 @@ from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ClipVariantType,
|
||||
ControlAdapterDefaultSettings,
|
||||
MainModelDefaultSettings,
|
||||
ModelFormat,
|
||||
@@ -85,7 +86,7 @@ class ModelRecordChanges(BaseModelExcludeNull):
|
||||
|
||||
# Checkpoint-specific changes
|
||||
# TODO(MM2): Should we expose these? Feels footgun-y...
|
||||
variant: Optional[ModelVariantType] = Field(description="The variant of the model.", default=None)
|
||||
variant: Optional[ModelVariantType | ClipVariantType] = Field(description="The variant of the model.", default=None)
|
||||
prediction_type: Optional[SchedulerPredictionType] = Field(
|
||||
description="The prediction type of the model.", default=None
|
||||
)
|
||||
|
||||
@@ -16,6 +16,7 @@ from pydantic import (
|
||||
from pydantic_core import to_jsonable_python
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||
from invokeai.app.invocations.fields import ImageField
|
||||
from invokeai.app.services.shared.graph import Graph, GraphExecutionState, NodeNotFoundError
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import (
|
||||
WorkflowWithoutID,
|
||||
@@ -51,11 +52,7 @@ class SessionQueueItemNotFoundError(ValueError):
|
||||
|
||||
# region Batch
|
||||
|
||||
BatchDataType = Union[
|
||||
StrictStr,
|
||||
float,
|
||||
int,
|
||||
]
|
||||
BatchDataType = Union[StrictStr, float, int, ImageField]
|
||||
|
||||
|
||||
class NodeFieldValue(BaseModel):
|
||||
|
||||
@@ -160,6 +160,10 @@ class LoggerInterface(InvocationContextInterface):
|
||||
|
||||
|
||||
class ImagesInterface(InvocationContextInterface):
|
||||
def __init__(self, services: InvocationServices, data: InvocationContextData, util: "UtilInterface") -> None:
|
||||
super().__init__(services, data)
|
||||
self._util = util
|
||||
|
||||
def save(
|
||||
self,
|
||||
image: Image,
|
||||
@@ -186,6 +190,8 @@ class ImagesInterface(InvocationContextInterface):
|
||||
The saved image DTO.
|
||||
"""
|
||||
|
||||
self._util.signal_progress("Saving image")
|
||||
|
||||
# If `metadata` is provided directly, use that. Else, use the metadata provided by `WithMetadata`, falling back to None.
|
||||
metadata_ = None
|
||||
if metadata:
|
||||
@@ -336,6 +342,10 @@ class ConditioningInterface(InvocationContextInterface):
|
||||
class ModelsInterface(InvocationContextInterface):
|
||||
"""Common API for loading, downloading and managing models."""
|
||||
|
||||
def __init__(self, services: InvocationServices, data: InvocationContextData, util: "UtilInterface") -> None:
|
||||
super().__init__(services, data)
|
||||
self._util = util
|
||||
|
||||
def exists(self, identifier: Union[str, "ModelIdentifierField"]) -> bool:
|
||||
"""Check if a model exists.
|
||||
|
||||
@@ -368,11 +378,15 @@ class ModelsInterface(InvocationContextInterface):
|
||||
|
||||
if isinstance(identifier, str):
|
||||
model = self._services.model_manager.store.get_model(identifier)
|
||||
return self._services.model_manager.load.load_model(model, submodel_type)
|
||||
else:
|
||||
_submodel_type = submodel_type or identifier.submodel_type
|
||||
submodel_type = submodel_type or identifier.submodel_type
|
||||
model = self._services.model_manager.store.get_model(identifier.key)
|
||||
return self._services.model_manager.load.load_model(model, _submodel_type)
|
||||
|
||||
message = f"Loading model {model.name}"
|
||||
if submodel_type:
|
||||
message += f" ({submodel_type.value})"
|
||||
self._util.signal_progress(message)
|
||||
return self._services.model_manager.load.load_model(model, submodel_type)
|
||||
|
||||
def load_by_attrs(
|
||||
self, name: str, base: BaseModelType, type: ModelType, submodel_type: Optional[SubModelType] = None
|
||||
@@ -397,6 +411,10 @@ class ModelsInterface(InvocationContextInterface):
|
||||
if len(configs) > 1:
|
||||
raise ValueError(f"More than one model found with name {name}, base {base}, and type {type}")
|
||||
|
||||
message = f"Loading model {name}"
|
||||
if submodel_type:
|
||||
message += f" ({submodel_type.value})"
|
||||
self._util.signal_progress(message)
|
||||
return self._services.model_manager.load.load_model(configs[0], submodel_type)
|
||||
|
||||
def get_config(self, identifier: Union[str, "ModelIdentifierField"]) -> AnyModelConfig:
|
||||
@@ -467,6 +485,7 @@ class ModelsInterface(InvocationContextInterface):
|
||||
Returns:
|
||||
Path to the downloaded model
|
||||
"""
|
||||
self._util.signal_progress(f"Downloading model {source}")
|
||||
return self._services.model_manager.install.download_and_cache_model(source=source)
|
||||
|
||||
def load_local_model(
|
||||
@@ -489,6 +508,8 @@ class ModelsInterface(InvocationContextInterface):
|
||||
Returns:
|
||||
A LoadedModelWithoutConfig object.
|
||||
"""
|
||||
|
||||
self._util.signal_progress(f"Loading model {model_path.name}")
|
||||
return self._services.model_manager.load.load_model_from_path(model_path=model_path, loader=loader)
|
||||
|
||||
def load_remote_model(
|
||||
@@ -514,6 +535,8 @@ class ModelsInterface(InvocationContextInterface):
|
||||
A LoadedModelWithoutConfig object.
|
||||
"""
|
||||
model_path = self._services.model_manager.install.download_and_cache_model(source=str(source))
|
||||
|
||||
self._util.signal_progress(f"Loading model {source}")
|
||||
return self._services.model_manager.load.load_model_from_path(model_path=model_path, loader=loader)
|
||||
|
||||
|
||||
@@ -707,12 +730,12 @@ def build_invocation_context(
|
||||
"""
|
||||
|
||||
logger = LoggerInterface(services=services, data=data)
|
||||
images = ImagesInterface(services=services, data=data)
|
||||
tensors = TensorsInterface(services=services, data=data)
|
||||
models = ModelsInterface(services=services, data=data)
|
||||
config = ConfigInterface(services=services, data=data)
|
||||
util = UtilInterface(services=services, data=data, is_canceled=is_canceled)
|
||||
conditioning = ConditioningInterface(services=services, data=data)
|
||||
models = ModelsInterface(services=services, data=data, util=util)
|
||||
images = ImagesInterface(services=services, data=data, util=util)
|
||||
boards = BoardsInterface(services=services, data=data)
|
||||
|
||||
ctx = InvocationContext(
|
||||
|
||||
@@ -0,0 +1,382 @@
|
||||
{
|
||||
"name": "SD3.5 Text to Image",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample text to image workflow for Stable Diffusion 3.5",
|
||||
"version": "1.0.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text2image, SD3.5, default",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
"nodeId": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"fieldName": "model"
|
||||
},
|
||||
{
|
||||
"nodeId": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"fieldName": "prompt"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"version": "3.0.0",
|
||||
"category": "default"
|
||||
},
|
||||
"id": "e3a51d6b-8208-4d6d-b187-fcfe8b32934c",
|
||||
"nodes": [
|
||||
{
|
||||
"id": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"type": "sd3_model_loader",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "f7b20be9-92a8-4cfb-bca4-6c3b5535c10b",
|
||||
"hash": "placeholder",
|
||||
"name": "stable-diffusion-3.5-medium",
|
||||
"base": "sd-3",
|
||||
"type": "main"
|
||||
}
|
||||
},
|
||||
"t5_encoder_model": {
|
||||
"name": "t5_encoder_model",
|
||||
"label": ""
|
||||
},
|
||||
"clip_l_model": {
|
||||
"name": "clip_l_model",
|
||||
"label": ""
|
||||
},
|
||||
"clip_g_model": {
|
||||
"name": "clip_g_model",
|
||||
"label": ""
|
||||
},
|
||||
"vae_model": {
|
||||
"name": "vae_model",
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": -55.58689609637031,
|
||||
"y": -111.53602444662268
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||
"type": "rand_int",
|
||||
"version": "1.0.1",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"low": {
|
||||
"name": "low",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"name": "high",
|
||||
"label": "",
|
||||
"value": 2147483647
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 470.45870147220353,
|
||||
"y": 350.3141781644303
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"type": "sd3_l2i",
|
||||
"version": "1.3.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"name": "latents",
|
||||
"label": ""
|
||||
},
|
||||
"vae": {
|
||||
"name": "vae",
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 1192.3097009334897,
|
||||
"y": -366.0994675072209
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"type": "sd3_text_encoder",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"clip_l": {
|
||||
"name": "clip_l",
|
||||
"label": ""
|
||||
},
|
||||
"clip_g": {
|
||||
"name": "clip_g",
|
||||
"label": ""
|
||||
},
|
||||
"t5_encoder": {
|
||||
"name": "t5_encoder",
|
||||
"label": ""
|
||||
},
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"label": "",
|
||||
"value": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 408.16054647924784,
|
||||
"y": 65.06415352118786
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"type": "sd3_text_encoder",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"clip_l": {
|
||||
"name": "clip_l",
|
||||
"label": ""
|
||||
},
|
||||
"clip_g": {
|
||||
"name": "clip_g",
|
||||
"label": ""
|
||||
},
|
||||
"t5_encoder": {
|
||||
"name": "t5_encoder",
|
||||
"label": ""
|
||||
},
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"label": "",
|
||||
"value": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 378.9283412440941,
|
||||
"y": -302.65777497352553
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"type": "sd3_denoise",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"transformer": {
|
||||
"name": "transformer",
|
||||
"label": ""
|
||||
},
|
||||
"positive_conditioning": {
|
||||
"name": "positive_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"negative_conditioning": {
|
||||
"name": "negative_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"cfg_scale": {
|
||||
"name": "cfg_scale",
|
||||
"label": "",
|
||||
"value": 3.5
|
||||
},
|
||||
"width": {
|
||||
"name": "width",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"height": {
|
||||
"name": "height",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"steps": {
|
||||
"name": "steps",
|
||||
"label": "",
|
||||
"value": 30
|
||||
},
|
||||
"seed": {
|
||||
"name": "seed",
|
||||
"label": "",
|
||||
"value": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 813.7814762740603,
|
||||
"y": -142.20529727605867
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cvae-9eb72af0-dd9e-4ec5-ad87-d65e3c01f48bvae",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ct5_encoder-3b4f7f27-cfc0-4373-a009-99c5290d0cd6t5_encoder",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"sourceHandle": "t5_encoder",
|
||||
"targetHandle": "t5_encoder"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ct5_encoder-e17d34e7-6ed1-493c-9a85-4fcd291cb084t5_encoder",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"sourceHandle": "t5_encoder",
|
||||
"targetHandle": "t5_encoder"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_g-3b4f7f27-cfc0-4373-a009-99c5290d0cd6clip_g",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"sourceHandle": "clip_g",
|
||||
"targetHandle": "clip_g"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_g-e17d34e7-6ed1-493c-9a85-4fcd291cb084clip_g",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"sourceHandle": "clip_g",
|
||||
"targetHandle": "clip_g"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_l-3b4f7f27-cfc0-4373-a009-99c5290d0cd6clip_l",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"sourceHandle": "clip_l",
|
||||
"targetHandle": "clip_l"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4cclip_l-e17d34e7-6ed1-493c-9a85-4fcd291cb084clip_l",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"sourceHandle": "clip_l",
|
||||
"targetHandle": "clip_l"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3f22f668-0e02-4fde-a2bb-c339586ceb4ctransformer-c7539f7b-7ac5-49b9-93eb-87ede611409ftransformer",
|
||||
"type": "default",
|
||||
"source": "3f22f668-0e02-4fde-a2bb-c339586ceb4c",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "transformer",
|
||||
"targetHandle": "transformer"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-f7e394ac-6394-4096-abcb-de0d346506b3value-c7539f7b-7ac5-49b9-93eb-87ede611409fseed",
|
||||
"type": "default",
|
||||
"source": "f7e394ac-6394-4096-abcb-de0d346506b3",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c7539f7b-7ac5-49b9-93eb-87ede611409flatents-9eb72af0-dd9e-4ec5-ad87-d65e3c01f48blatents",
|
||||
"type": "default",
|
||||
"source": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"target": "9eb72af0-dd9e-4ec5-ad87-d65e3c01f48b",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-e17d34e7-6ed1-493c-9a85-4fcd291cb084conditioning-c7539f7b-7ac5-49b9-93eb-87ede611409fpositive_conditioning",
|
||||
"type": "default",
|
||||
"source": "e17d34e7-6ed1-493c-9a85-4fcd291cb084",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3b4f7f27-cfc0-4373-a009-99c5290d0cd6conditioning-c7539f7b-7ac5-49b9-93eb-87ede611409fnegative_conditioning",
|
||||
"type": "default",
|
||||
"source": "3b4f7f27-cfc0-4373-a009-99c5290d0cd6",
|
||||
"target": "c7539f7b-7ac5-49b9-93eb-87ede611409f",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "negative_conditioning"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -34,6 +34,25 @@ SD1_5_LATENT_RGB_FACTORS = [
|
||||
[-0.1307, -0.1874, -0.7445], # L4
|
||||
]
|
||||
|
||||
SD3_5_LATENT_RGB_FACTORS = [
|
||||
[-0.05240681, 0.03251581, 0.0749016],
|
||||
[-0.0580572, 0.00759826, 0.05729818],
|
||||
[0.16144888, 0.01270368, -0.03768577],
|
||||
[0.14418615, 0.08460266, 0.15941818],
|
||||
[0.04894035, 0.0056485, -0.06686988],
|
||||
[0.05187166, 0.19222395, 0.06261094],
|
||||
[0.1539433, 0.04818359, 0.07103094],
|
||||
[-0.08601796, 0.09013458, 0.10893912],
|
||||
[-0.12398469, -0.06766567, 0.0033688],
|
||||
[-0.0439737, 0.07825329, 0.02258823],
|
||||
[0.03101129, 0.06382551, 0.07753657],
|
||||
[-0.01315361, 0.08554491, -0.08772475],
|
||||
[0.06464487, 0.05914605, 0.13262741],
|
||||
[-0.07863674, -0.02261737, -0.12761454],
|
||||
[-0.09923835, -0.08010759, -0.06264447],
|
||||
[-0.03392309, -0.0804029, -0.06078822],
|
||||
]
|
||||
|
||||
FLUX_LATENT_RGB_FACTORS = [
|
||||
[-0.0412, 0.0149, 0.0521],
|
||||
[0.0056, 0.0291, 0.0768],
|
||||
@@ -110,6 +129,9 @@ def stable_diffusion_step_callback(
|
||||
sdxl_latent_rgb_factors = torch.tensor(SDXL_LATENT_RGB_FACTORS, dtype=sample.dtype, device=sample.device)
|
||||
sdxl_smooth_matrix = torch.tensor(SDXL_SMOOTH_MATRIX, dtype=sample.dtype, device=sample.device)
|
||||
image = sample_to_lowres_estimated_image(sample, sdxl_latent_rgb_factors, sdxl_smooth_matrix)
|
||||
elif base_model == BaseModelType.StableDiffusion3:
|
||||
sd3_latent_rgb_factors = torch.tensor(SD3_5_LATENT_RGB_FACTORS, dtype=sample.dtype, device=sample.device)
|
||||
image = sample_to_lowres_estimated_image(sample, sd3_latent_rgb_factors)
|
||||
else:
|
||||
v1_5_latent_rgb_factors = torch.tensor(SD1_5_LATENT_RGB_FACTORS, dtype=sample.dtype, device=sample.device)
|
||||
image = sample_to_lowres_estimated_image(sample, v1_5_latent_rgb_factors)
|
||||
|
||||
@@ -41,10 +41,12 @@ def infer_xlabs_ip_adapter_params_from_state_dict(state_dict: dict[str, torch.Te
|
||||
hidden_dim = state_dict["double_blocks.0.processor.ip_adapter_double_stream_k_proj.weight"].shape[0]
|
||||
context_dim = state_dict["double_blocks.0.processor.ip_adapter_double_stream_k_proj.weight"].shape[1]
|
||||
clip_embeddings_dim = state_dict["ip_adapter_proj_model.proj.weight"].shape[1]
|
||||
clip_extra_context_tokens = state_dict["ip_adapter_proj_model.proj.weight"].shape[0] // context_dim
|
||||
|
||||
return XlabsIpAdapterParams(
|
||||
num_double_blocks=num_double_blocks,
|
||||
context_dim=context_dim,
|
||||
hidden_dim=hidden_dim,
|
||||
clip_embeddings_dim=clip_embeddings_dim,
|
||||
clip_extra_context_tokens=clip_extra_context_tokens,
|
||||
)
|
||||
|
||||
@@ -31,13 +31,16 @@ class XlabsIpAdapterParams:
|
||||
hidden_dim: int
|
||||
|
||||
clip_embeddings_dim: int
|
||||
clip_extra_context_tokens: int
|
||||
|
||||
|
||||
class XlabsIpAdapterFlux(torch.nn.Module):
|
||||
def __init__(self, params: XlabsIpAdapterParams):
|
||||
super().__init__()
|
||||
self.image_proj = ImageProjModel(
|
||||
cross_attention_dim=params.context_dim, clip_embeddings_dim=params.clip_embeddings_dim
|
||||
cross_attention_dim=params.context_dim,
|
||||
clip_embeddings_dim=params.clip_embeddings_dim,
|
||||
clip_extra_context_tokens=params.clip_extra_context_tokens,
|
||||
)
|
||||
self.ip_adapter_double_blocks = IPAdapterDoubleBlocks(
|
||||
num_double_blocks=params.num_double_blocks, context_dim=params.context_dim, hidden_dim=params.hidden_dim
|
||||
|
||||
BIN
invokeai/backend/image_util/assets/CIELab_to_UPLab.icc
Normal file
BIN
invokeai/backend/image_util/assets/CIELab_to_UPLab.icc
Normal file
Binary file not shown.
1020
invokeai/backend/image_util/composition.py
Normal file
1020
invokeai/backend/image_util/composition.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -17,9 +17,23 @@ class DepthAnythingPipeline(RawModel):
|
||||
self._pipeline = pipeline
|
||||
|
||||
def generate_depth(self, image: Image.Image) -> Image.Image:
|
||||
depth_map = self._pipeline(image)["depth"]
|
||||
assert isinstance(depth_map, Image.Image)
|
||||
return depth_map
|
||||
pipeline_result = self._pipeline(image)
|
||||
predicted_depth = pipeline_result["predicted_depth"]
|
||||
assert isinstance(predicted_depth, torch.Tensor)
|
||||
|
||||
# Convert to PIL Image.
|
||||
# Note: The pipeline already returns a PIL Image (pipeline_result["depth"]), but it contains artifacts as
|
||||
# described here: https://github.com/invoke-ai/InvokeAI/issues/7358.
|
||||
# We implement custom post-processing logic to avoid the artifacts.
|
||||
prediction = torch.nn.functional.interpolate(
|
||||
predicted_depth.unsqueeze(1), size=image.size[::-1], mode="bilinear", align_corners=False
|
||||
)
|
||||
prediction = prediction / prediction.max()
|
||||
output = prediction.squeeze().cpu().numpy()
|
||||
output = (output * 255).clip(0, 255)
|
||||
formatted = output.astype("uint8")
|
||||
depth = Image.fromarray(formatted)
|
||||
return depth
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None):
|
||||
if device is not None and device.type not in {"cpu", "cuda"}:
|
||||
|
||||
@@ -45,8 +45,9 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
# Constants for FLUX.1
|
||||
num_double_layers = 19
|
||||
num_single_layers = 38
|
||||
# inner_dim = 3072
|
||||
# mlp_ratio = 4.0
|
||||
hidden_size = 3072
|
||||
mlp_ratio = 4.0
|
||||
mlp_hidden_dim = int(hidden_size * mlp_ratio)
|
||||
|
||||
layers: dict[str, AnyLoRALayer] = {}
|
||||
|
||||
@@ -62,30 +63,43 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
layers[dst_key] = LoRALayer.from_state_dict_values(values=value)
|
||||
assert len(src_layer_dict) == 0
|
||||
|
||||
def add_qkv_lora_layer_if_present(src_keys: list[str], dst_qkv_key: str) -> None:
|
||||
def add_qkv_lora_layer_if_present(
|
||||
src_keys: list[str],
|
||||
src_weight_shapes: list[tuple[int, int]],
|
||||
dst_qkv_key: str,
|
||||
allow_missing_keys: bool = False,
|
||||
) -> None:
|
||||
"""Handle the Q, K, V matrices for a transformer block. We need special handling because the diffusers format
|
||||
stores them in separate matrices, whereas the BFL format used internally by InvokeAI concatenates them.
|
||||
"""
|
||||
# We expect that either all src keys are present or none of them are. Verify this.
|
||||
keys_present = [key in grouped_state_dict for key in src_keys]
|
||||
assert all(keys_present) or not any(keys_present)
|
||||
|
||||
# If none of the keys are present, return early.
|
||||
keys_present = [key in grouped_state_dict for key in src_keys]
|
||||
if not any(keys_present):
|
||||
return
|
||||
|
||||
src_layer_dicts = [grouped_state_dict.pop(key) for key in src_keys]
|
||||
sub_layers: list[LoRALayer] = []
|
||||
for src_layer_dict in src_layer_dicts:
|
||||
values = {
|
||||
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
|
||||
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
|
||||
}
|
||||
if alpha is not None:
|
||||
values["alpha"] = torch.tensor(alpha)
|
||||
sub_layers.append(LoRALayer.from_state_dict_values(values=values))
|
||||
assert len(src_layer_dict) == 0
|
||||
layers[dst_qkv_key] = ConcatenatedLoRALayer(lora_layers=sub_layers, concat_axis=0)
|
||||
for src_key, src_weight_shape in zip(src_keys, src_weight_shapes, strict=True):
|
||||
src_layer_dict = grouped_state_dict.pop(src_key, None)
|
||||
if src_layer_dict is not None:
|
||||
values = {
|
||||
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
|
||||
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
|
||||
}
|
||||
if alpha is not None:
|
||||
values["alpha"] = torch.tensor(alpha)
|
||||
assert values["lora_down.weight"].shape[1] == src_weight_shape[1]
|
||||
assert values["lora_up.weight"].shape[0] == src_weight_shape[0]
|
||||
sub_layers.append(LoRALayer.from_state_dict_values(values=values))
|
||||
assert len(src_layer_dict) == 0
|
||||
else:
|
||||
if not allow_missing_keys:
|
||||
raise ValueError(f"Missing LoRA layer: '{src_key}'.")
|
||||
values = {
|
||||
"lora_up.weight": torch.zeros((src_weight_shape[0], 1)),
|
||||
"lora_down.weight": torch.zeros((1, src_weight_shape[1])),
|
||||
}
|
||||
sub_layers.append(LoRALayer.from_state_dict_values(values=values))
|
||||
layers[dst_qkv_key] = ConcatenatedLoRALayer(lora_layers=sub_layers)
|
||||
|
||||
# time_text_embed.timestep_embedder -> time_in.
|
||||
add_lora_layer_if_present("time_text_embed.timestep_embedder.linear_1", "time_in.in_layer")
|
||||
@@ -118,6 +132,7 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
f"transformer_blocks.{i}.attn.to_k",
|
||||
f"transformer_blocks.{i}.attn.to_v",
|
||||
],
|
||||
[(hidden_size, hidden_size), (hidden_size, hidden_size), (hidden_size, hidden_size)],
|
||||
f"double_blocks.{i}.img_attn.qkv",
|
||||
)
|
||||
add_qkv_lora_layer_if_present(
|
||||
@@ -126,6 +141,7 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
f"transformer_blocks.{i}.attn.add_k_proj",
|
||||
f"transformer_blocks.{i}.attn.add_v_proj",
|
||||
],
|
||||
[(hidden_size, hidden_size), (hidden_size, hidden_size), (hidden_size, hidden_size)],
|
||||
f"double_blocks.{i}.txt_attn.qkv",
|
||||
)
|
||||
|
||||
@@ -175,7 +191,14 @@ def lora_model_from_flux_diffusers_state_dict(state_dict: Dict[str, torch.Tensor
|
||||
f"single_transformer_blocks.{i}.attn.to_v",
|
||||
f"single_transformer_blocks.{i}.proj_mlp",
|
||||
],
|
||||
[
|
||||
(hidden_size, hidden_size),
|
||||
(hidden_size, hidden_size),
|
||||
(hidden_size, hidden_size),
|
||||
(mlp_hidden_dim, hidden_size),
|
||||
],
|
||||
f"single_blocks.{i}.linear1",
|
||||
allow_missing_keys=True,
|
||||
)
|
||||
|
||||
# Output projections.
|
||||
|
||||
@@ -53,6 +53,7 @@ class BaseModelType(str, Enum):
|
||||
Any = "any"
|
||||
StableDiffusion1 = "sd-1"
|
||||
StableDiffusion2 = "sd-2"
|
||||
StableDiffusion3 = "sd-3"
|
||||
StableDiffusionXL = "sdxl"
|
||||
StableDiffusionXLRefiner = "sdxl-refiner"
|
||||
Flux = "flux"
|
||||
@@ -83,8 +84,10 @@ class SubModelType(str, Enum):
|
||||
Transformer = "transformer"
|
||||
TextEncoder = "text_encoder"
|
||||
TextEncoder2 = "text_encoder_2"
|
||||
TextEncoder3 = "text_encoder_3"
|
||||
Tokenizer = "tokenizer"
|
||||
Tokenizer2 = "tokenizer_2"
|
||||
Tokenizer3 = "tokenizer_3"
|
||||
VAE = "vae"
|
||||
VAEDecoder = "vae_decoder"
|
||||
VAEEncoder = "vae_encoder"
|
||||
@@ -92,6 +95,13 @@ class SubModelType(str, Enum):
|
||||
SafetyChecker = "safety_checker"
|
||||
|
||||
|
||||
class ClipVariantType(str, Enum):
|
||||
"""Variant type."""
|
||||
|
||||
L = "large"
|
||||
G = "gigantic"
|
||||
|
||||
|
||||
class ModelVariantType(str, Enum):
|
||||
"""Variant type."""
|
||||
|
||||
@@ -147,6 +157,17 @@ class ModelSourceType(str, Enum):
|
||||
DEFAULTS_PRECISION = Literal["fp16", "fp32"]
|
||||
|
||||
|
||||
AnyVariant: TypeAlias = Union[ModelVariantType, ClipVariantType, None]
|
||||
|
||||
|
||||
class SubmodelDefinition(BaseModel):
|
||||
path_or_prefix: str
|
||||
model_type: ModelType
|
||||
variant: AnyVariant = None
|
||||
|
||||
model_config = ConfigDict(protected_namespaces=())
|
||||
|
||||
|
||||
class MainModelDefaultSettings(BaseModel):
|
||||
vae: str | None = Field(default=None, description="Default VAE for this model (model key)")
|
||||
vae_precision: DEFAULTS_PRECISION | None = Field(default=None, description="Default VAE precision for this model")
|
||||
@@ -193,6 +214,9 @@ class ModelConfigBase(BaseModel):
|
||||
schema["required"].extend(["key", "type", "format"])
|
||||
|
||||
model_config = ConfigDict(validate_assignment=True, json_schema_extra=json_schema_extra)
|
||||
submodels: Optional[Dict[SubModelType, SubmodelDefinition]] = Field(
|
||||
description="Loadable submodels in this model", default=None
|
||||
)
|
||||
|
||||
|
||||
class CheckpointConfigBase(ModelConfigBase):
|
||||
@@ -335,7 +359,7 @@ class MainConfigBase(ModelConfigBase):
|
||||
default_settings: Optional[MainModelDefaultSettings] = Field(
|
||||
description="Default settings for this model", default=None
|
||||
)
|
||||
variant: ModelVariantType = ModelVariantType.Normal
|
||||
variant: AnyVariant = ModelVariantType.Normal
|
||||
|
||||
|
||||
class MainCheckpointConfig(CheckpointConfigBase, MainConfigBase):
|
||||
@@ -419,12 +443,33 @@ class CLIPEmbedDiffusersConfig(DiffusersConfigBase):
|
||||
|
||||
type: Literal[ModelType.CLIPEmbed] = ModelType.CLIPEmbed
|
||||
format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers
|
||||
variant: ClipVariantType = ClipVariantType.L
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
return Tag(f"{ModelType.CLIPEmbed.value}.{ModelFormat.Diffusers.value}")
|
||||
|
||||
|
||||
class CLIPGEmbedDiffusersConfig(CLIPEmbedDiffusersConfig):
|
||||
"""Model config for CLIP-G Embeddings."""
|
||||
|
||||
variant: ClipVariantType = ClipVariantType.G
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
return Tag(f"{ModelType.CLIPEmbed.value}.{ModelFormat.Diffusers.value}.{ClipVariantType.G}")
|
||||
|
||||
|
||||
class CLIPLEmbedDiffusersConfig(CLIPEmbedDiffusersConfig):
|
||||
"""Model config for CLIP-L Embeddings."""
|
||||
|
||||
variant: ClipVariantType = ClipVariantType.L
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
return Tag(f"{ModelType.CLIPEmbed.value}.{ModelFormat.Diffusers.value}.{ClipVariantType.L}")
|
||||
|
||||
|
||||
class CLIPVisionDiffusersConfig(DiffusersConfigBase):
|
||||
"""Model config for CLIPVision."""
|
||||
|
||||
@@ -501,6 +546,8 @@ AnyModelConfig = Annotated[
|
||||
Annotated[SpandrelImageToImageConfig, SpandrelImageToImageConfig.get_tag()],
|
||||
Annotated[CLIPVisionDiffusersConfig, CLIPVisionDiffusersConfig.get_tag()],
|
||||
Annotated[CLIPEmbedDiffusersConfig, CLIPEmbedDiffusersConfig.get_tag()],
|
||||
Annotated[CLIPLEmbedDiffusersConfig, CLIPLEmbedDiffusersConfig.get_tag()],
|
||||
Annotated[CLIPGEmbedDiffusersConfig, CLIPGEmbedDiffusersConfig.get_tag()],
|
||||
],
|
||||
Discriminator(get_model_discriminator_value),
|
||||
]
|
||||
|
||||
@@ -35,6 +35,7 @@ class ModelLoader(ModelLoaderBase):
|
||||
self._logger = logger
|
||||
self._ram_cache = ram_cache
|
||||
self._torch_dtype = TorchDevice.choose_torch_dtype()
|
||||
self._torch_device = TorchDevice.choose_torch_device()
|
||||
|
||||
def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel:
|
||||
"""
|
||||
|
||||
@@ -84,7 +84,15 @@ class FluxVAELoader(ModelLoader):
|
||||
model = AutoEncoder(ae_params[config.config_path])
|
||||
sd = load_file(model_path)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
model.to(dtype=self._torch_dtype)
|
||||
# VAE is broken in float16, which mps defaults to
|
||||
if self._torch_dtype == torch.float16:
|
||||
try:
|
||||
vae_dtype = torch.tensor([1.0], dtype=torch.bfloat16, device=self._torch_device).dtype
|
||||
except TypeError:
|
||||
vae_dtype = torch.float32
|
||||
else:
|
||||
vae_dtype = self._torch_dtype
|
||||
model.to(vae_dtype)
|
||||
|
||||
return model
|
||||
|
||||
@@ -128,9 +136,9 @@ class BnbQuantizedLlmInt8bCheckpointModel(ModelLoader):
|
||||
"The bnb modules are not available. Please install bitsandbytes if available on your platform."
|
||||
)
|
||||
match submodel_type:
|
||||
case SubModelType.Tokenizer2:
|
||||
case SubModelType.Tokenizer2 | SubModelType.Tokenizer3:
|
||||
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
|
||||
case SubModelType.TextEncoder2:
|
||||
case SubModelType.TextEncoder2 | SubModelType.TextEncoder3:
|
||||
te2_model_path = Path(config.path) / "text_encoder_2"
|
||||
model_config = AutoConfig.from_pretrained(te2_model_path)
|
||||
with accelerate.init_empty_weights():
|
||||
@@ -172,9 +180,9 @@ class T5EncoderCheckpointModel(ModelLoader):
|
||||
raise ValueError("Only T5EncoderConfig models are currently supported here.")
|
||||
|
||||
match submodel_type:
|
||||
case SubModelType.Tokenizer2:
|
||||
case SubModelType.Tokenizer2 | SubModelType.Tokenizer3:
|
||||
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
|
||||
case SubModelType.TextEncoder2:
|
||||
case SubModelType.TextEncoder2 | SubModelType.TextEncoder3:
|
||||
return T5EncoderModel.from_pretrained(Path(config.path) / "text_encoder_2", torch_dtype="auto")
|
||||
|
||||
raise ValueError(
|
||||
|
||||
@@ -42,6 +42,7 @@ VARIANT_TO_IN_CHANNEL_MAP = {
|
||||
@ModelLoaderRegistry.register(
|
||||
base=BaseModelType.StableDiffusionXLRefiner, type=ModelType.Main, format=ModelFormat.Diffusers
|
||||
)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion3, type=ModelType.Main, format=ModelFormat.Diffusers)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion1, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion2, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusionXL, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
||||
@@ -51,13 +52,6 @@ VARIANT_TO_IN_CHANNEL_MAP = {
|
||||
class StableDiffusionDiffusersModel(GenericDiffusersLoader):
|
||||
"""Class to load main models."""
|
||||
|
||||
model_base_to_model_type = {
|
||||
BaseModelType.StableDiffusion1: "FrozenCLIPEmbedder",
|
||||
BaseModelType.StableDiffusion2: "FrozenOpenCLIPEmbedder",
|
||||
BaseModelType.StableDiffusionXL: "SDXL",
|
||||
BaseModelType.StableDiffusionXLRefiner: "SDXL-Refiner",
|
||||
}
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Literal, Optional, Union
|
||||
from typing import Any, Callable, Dict, Literal, Optional, Union
|
||||
|
||||
import safetensors.torch
|
||||
import spandrel
|
||||
@@ -22,6 +22,7 @@ from invokeai.backend.lora.conversions.flux_kohya_lora_conversion_utils import i
|
||||
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
AnyVariant,
|
||||
BaseModelType,
|
||||
ControlAdapterDefaultSettings,
|
||||
InvalidModelConfigException,
|
||||
@@ -33,8 +34,15 @@ from invokeai.backend.model_manager.config import (
|
||||
ModelType,
|
||||
ModelVariantType,
|
||||
SchedulerPredictionType,
|
||||
SubmodelDefinition,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import ConfigLoader
|
||||
from invokeai.backend.model_manager.util.model_util import (
|
||||
get_clip_variant_type,
|
||||
lora_token_vector_length,
|
||||
read_checkpoint_meta,
|
||||
)
|
||||
from invokeai.backend.model_manager.util.model_util import lora_token_vector_length, read_checkpoint_meta
|
||||
from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
|
||||
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
|
||||
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
|
||||
@@ -112,6 +120,7 @@ class ModelProbe(object):
|
||||
"StableDiffusionXLPipeline": ModelType.Main,
|
||||
"StableDiffusionXLImg2ImgPipeline": ModelType.Main,
|
||||
"StableDiffusionXLInpaintPipeline": ModelType.Main,
|
||||
"StableDiffusion3Pipeline": ModelType.Main,
|
||||
"LatentConsistencyModelPipeline": ModelType.Main,
|
||||
"AutoencoderKL": ModelType.VAE,
|
||||
"AutoencoderTiny": ModelType.VAE,
|
||||
@@ -122,8 +131,12 @@ class ModelProbe(object):
|
||||
"CLIPTextModel": ModelType.CLIPEmbed,
|
||||
"T5EncoderModel": ModelType.T5Encoder,
|
||||
"FluxControlNetModel": ModelType.ControlNet,
|
||||
"SD3Transformer2DModel": ModelType.Main,
|
||||
"CLIPTextModelWithProjection": ModelType.CLIPEmbed,
|
||||
}
|
||||
|
||||
TYPE2VARIANT: Dict[ModelType, Callable[[str], Optional[AnyVariant]]] = {ModelType.CLIPEmbed: get_clip_variant_type}
|
||||
|
||||
@classmethod
|
||||
def register_probe(
|
||||
cls, format: Literal["diffusers", "checkpoint", "onnx"], model_type: ModelType, probe_class: type[ProbeBase]
|
||||
@@ -170,7 +183,10 @@ class ModelProbe(object):
|
||||
fields["path"] = model_path.as_posix()
|
||||
fields["type"] = fields.get("type") or model_type
|
||||
fields["base"] = fields.get("base") or probe.get_base_type()
|
||||
fields["variant"] = fields.get("variant") or probe.get_variant_type()
|
||||
variant_func = cls.TYPE2VARIANT.get(fields["type"], None)
|
||||
fields["variant"] = (
|
||||
fields.get("variant") or (variant_func and variant_func(model_path.as_posix())) or probe.get_variant_type()
|
||||
)
|
||||
fields["prediction_type"] = fields.get("prediction_type") or probe.get_scheduler_prediction_type()
|
||||
fields["image_encoder_model_id"] = fields.get("image_encoder_model_id") or probe.get_image_encoder_model_id()
|
||||
fields["name"] = fields.get("name") or cls.get_model_name(model_path)
|
||||
@@ -217,6 +233,10 @@ class ModelProbe(object):
|
||||
and fields["prediction_type"] == SchedulerPredictionType.VPrediction
|
||||
)
|
||||
|
||||
get_submodels = getattr(probe, "get_submodels", None)
|
||||
if fields["base"] == BaseModelType.StableDiffusion3 and callable(get_submodels):
|
||||
fields["submodels"] = get_submodels()
|
||||
|
||||
model_info = ModelConfigFactory.make_config(fields) # , key=fields.get("key", None))
|
||||
return model_info
|
||||
|
||||
@@ -449,7 +469,7 @@ class ModelProbe(object):
|
||||
"""
|
||||
# scan model
|
||||
scan_result = scan_file_path(checkpoint)
|
||||
if scan_result.infected_files != 0:
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise Exception("The model {model_name} is potentially infected by malware. Aborting import.")
|
||||
|
||||
|
||||
@@ -465,6 +485,7 @@ MODEL_NAME_TO_PREPROCESSOR = {
|
||||
"lineart anime": "lineart_anime_image_processor",
|
||||
"lineart_anime": "lineart_anime_image_processor",
|
||||
"lineart": "lineart_image_processor",
|
||||
"soft": "hed_image_processor",
|
||||
"softedge": "hed_image_processor",
|
||||
"hed": "hed_image_processor",
|
||||
"shuffle": "content_shuffle_image_processor",
|
||||
@@ -747,18 +768,33 @@ class FolderProbeBase(ProbeBase):
|
||||
|
||||
class PipelineFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
with open(self.model_path / "unet" / "config.json", "r") as file:
|
||||
unet_conf = json.load(file)
|
||||
if unet_conf["cross_attention_dim"] == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif unet_conf["cross_attention_dim"] == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
elif unet_conf["cross_attention_dim"] == 1280:
|
||||
return BaseModelType.StableDiffusionXLRefiner
|
||||
elif unet_conf["cross_attention_dim"] == 2048:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
else:
|
||||
raise InvalidModelConfigException(f"Unknown base model for {self.model_path}")
|
||||
# Handle pipelines with a UNet (i.e SD 1.x, SD2, SDXL).
|
||||
config_path = self.model_path / "unet" / "config.json"
|
||||
if config_path.exists():
|
||||
with open(config_path) as file:
|
||||
unet_conf = json.load(file)
|
||||
if unet_conf["cross_attention_dim"] == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif unet_conf["cross_attention_dim"] == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
elif unet_conf["cross_attention_dim"] == 1280:
|
||||
return BaseModelType.StableDiffusionXLRefiner
|
||||
elif unet_conf["cross_attention_dim"] == 2048:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
else:
|
||||
raise InvalidModelConfigException(f"Unknown base model for {self.model_path}")
|
||||
|
||||
# Handle pipelines with a transformer (i.e. SD3).
|
||||
config_path = self.model_path / "transformer" / "config.json"
|
||||
if config_path.exists():
|
||||
with open(config_path) as file:
|
||||
transformer_conf = json.load(file)
|
||||
if transformer_conf["_class_name"] == "SD3Transformer2DModel":
|
||||
return BaseModelType.StableDiffusion3
|
||||
else:
|
||||
raise InvalidModelConfigException(f"Unknown base model for {self.model_path}")
|
||||
|
||||
raise InvalidModelConfigException(f"Unknown base model for {self.model_path}")
|
||||
|
||||
def get_scheduler_prediction_type(self) -> SchedulerPredictionType:
|
||||
with open(self.model_path / "scheduler" / "scheduler_config.json", "r") as file:
|
||||
@@ -770,6 +806,23 @@ class PipelineFolderProbe(FolderProbeBase):
|
||||
else:
|
||||
raise InvalidModelConfigException("Unknown scheduler prediction type: {scheduler_conf['prediction_type']}")
|
||||
|
||||
def get_submodels(self) -> Dict[SubModelType, SubmodelDefinition]:
|
||||
config = ConfigLoader.load_config(self.model_path, config_name="model_index.json")
|
||||
submodels: Dict[SubModelType, SubmodelDefinition] = {}
|
||||
for key, value in config.items():
|
||||
if key.startswith("_") or not (isinstance(value, list) and len(value) == 2):
|
||||
continue
|
||||
model_loader = str(value[1])
|
||||
if model_type := ModelProbe.CLASS2TYPE.get(model_loader):
|
||||
variant_func = ModelProbe.TYPE2VARIANT.get(model_type, None)
|
||||
submodels[SubModelType(key)] = SubmodelDefinition(
|
||||
path_or_prefix=(self.model_path / key).resolve().as_posix(),
|
||||
model_type=model_type,
|
||||
variant=variant_func and variant_func((self.model_path / key).as_posix()),
|
||||
)
|
||||
|
||||
return submodels
|
||||
|
||||
def get_variant_type(self) -> ModelVariantType:
|
||||
# This only works for pipelines! Any kind of
|
||||
# exception results in our returning the
|
||||
|
||||
@@ -140,6 +140,22 @@ flux_dev = StarterModel(
|
||||
type=ModelType.Main,
|
||||
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
|
||||
)
|
||||
sd35_medium = StarterModel(
|
||||
name="SD3.5 Medium",
|
||||
base=BaseModelType.StableDiffusion3,
|
||||
source="stabilityai/stable-diffusion-3.5-medium",
|
||||
description="Medium SD3.5 Model: ~15GB",
|
||||
type=ModelType.Main,
|
||||
dependencies=[],
|
||||
)
|
||||
sd35_large = StarterModel(
|
||||
name="SD3.5 Large",
|
||||
base=BaseModelType.StableDiffusion3,
|
||||
source="stabilityai/stable-diffusion-3.5-large",
|
||||
description="Large SD3.5 Model: ~19G",
|
||||
type=ModelType.Main,
|
||||
dependencies=[],
|
||||
)
|
||||
cyberrealistic_sd1 = StarterModel(
|
||||
name="CyberRealistic v4.1",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
@@ -282,13 +298,12 @@ ip_adapter_sdxl = StarterModel(
|
||||
previous_names=["IP Adapter SDXL"],
|
||||
)
|
||||
ip_adapter_flux = StarterModel(
|
||||
name="Standard Reference (XLabs FLUX IP-Adapter)",
|
||||
name="Standard Reference (XLabs FLUX IP-Adapter v2)",
|
||||
base=BaseModelType.Flux,
|
||||
source="https://huggingface.co/XLabs-AI/flux-ip-adapter/resolve/main/flux-ip-adapter.safetensors",
|
||||
source="https://huggingface.co/XLabs-AI/flux-ip-adapter-v2/resolve/main/ip_adapter.safetensors",
|
||||
description="References images with a more generalized/looser degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[clip_vit_l_image_encoder],
|
||||
previous_names=["XLabs FLUX IP-Adapter"],
|
||||
)
|
||||
# endregion
|
||||
# region ControlNet
|
||||
@@ -570,6 +585,8 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
flux_dev_quantized,
|
||||
flux_schnell,
|
||||
flux_dev,
|
||||
sd35_medium,
|
||||
sd35_large,
|
||||
cyberrealistic_sd1,
|
||||
rev_animated_sd1,
|
||||
dreamshaper_8_sd1,
|
||||
|
||||
@@ -8,6 +8,7 @@ import safetensors
|
||||
import torch
|
||||
from picklescan.scanner import scan_file_path
|
||||
|
||||
from invokeai.backend.model_manager.config import ClipVariantType
|
||||
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
|
||||
|
||||
|
||||
@@ -43,7 +44,7 @@ def _fast_safetensors_reader(path: str) -> Dict[str, torch.Tensor]:
|
||||
return checkpoint
|
||||
|
||||
|
||||
def read_checkpoint_meta(path: Union[str, Path], scan: bool = False) -> Dict[str, torch.Tensor]:
|
||||
def read_checkpoint_meta(path: Union[str, Path], scan: bool = True) -> Dict[str, torch.Tensor]:
|
||||
if str(path).endswith(".safetensors"):
|
||||
try:
|
||||
path_str = path.as_posix() if isinstance(path, Path) else path
|
||||
@@ -54,7 +55,7 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = False) -> Dict[str
|
||||
else:
|
||||
if scan:
|
||||
scan_result = scan_file_path(path)
|
||||
if scan_result.infected_files != 0:
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise Exception(f'The model file "{path}" is potentially infected by malware. Aborting import.')
|
||||
if str(path).endswith(".gguf"):
|
||||
# The GGUF reader used here uses numpy memmap, so these tensors are not loaded into memory during this function
|
||||
@@ -165,3 +166,25 @@ def convert_bundle_to_flux_transformer_checkpoint(
|
||||
del transformer_state_dict[k]
|
||||
|
||||
return original_state_dict
|
||||
|
||||
|
||||
def get_clip_variant_type(location: str) -> Optional[ClipVariantType]:
|
||||
try:
|
||||
path = Path(location)
|
||||
config_path = path / "config.json"
|
||||
if not config_path.exists():
|
||||
config_path = path / "text_encoder" / "config.json"
|
||||
if not config_path.exists():
|
||||
return ClipVariantType.L
|
||||
with open(config_path) as file:
|
||||
clip_conf = json.load(file)
|
||||
hidden_size = clip_conf.get("hidden_size", -1)
|
||||
match hidden_size:
|
||||
case 1280:
|
||||
return ClipVariantType.G
|
||||
case 768:
|
||||
return ClipVariantType.L
|
||||
case _:
|
||||
return ClipVariantType.L
|
||||
except Exception:
|
||||
return ClipVariantType.L
|
||||
|
||||
@@ -85,6 +85,7 @@ def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path
|
||||
"""Select the proper variant files from a list of HuggingFace repo_id paths."""
|
||||
result: set[Path] = set()
|
||||
subfolder_weights: dict[Path, list[SubfolderCandidate]] = {}
|
||||
safetensors_detected = False
|
||||
for path in files:
|
||||
if path.suffix in [".onnx", ".pb", ".onnx_data"]:
|
||||
if variant == ModelRepoVariant.ONNX:
|
||||
@@ -119,19 +120,27 @@ def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path
|
||||
# We prefer safetensors over other file formats and an exact variant match. We'll score each file based on
|
||||
# variant and format and select the best one.
|
||||
|
||||
if safetensors_detected and path.suffix == ".bin":
|
||||
continue
|
||||
|
||||
parent = path.parent
|
||||
score = 0
|
||||
|
||||
if path.suffix == ".safetensors":
|
||||
safetensors_detected = True
|
||||
if parent in subfolder_weights:
|
||||
subfolder_weights[parent] = [sfc for sfc in subfolder_weights[parent] if sfc.path.suffix != ".bin"]
|
||||
score += 1
|
||||
|
||||
candidate_variant_label = path.suffixes[0] if len(path.suffixes) == 2 else None
|
||||
|
||||
# Some special handling is needed here if there is not an exact match and if we cannot infer the variant
|
||||
# from the file name. In this case, we only give this file a point if the requested variant is FP32 or DEFAULT.
|
||||
if candidate_variant_label == f".{variant}" or (
|
||||
not candidate_variant_label and variant in [ModelRepoVariant.FP32, ModelRepoVariant.Default]
|
||||
):
|
||||
if (
|
||||
variant is not ModelRepoVariant.Default
|
||||
and candidate_variant_label
|
||||
and candidate_variant_label.startswith(f".{variant.value}")
|
||||
) or (not candidate_variant_label and variant in [ModelRepoVariant.FP32, ModelRepoVariant.Default]):
|
||||
score += 1
|
||||
|
||||
if parent not in subfolder_weights:
|
||||
@@ -146,7 +155,7 @@ def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path
|
||||
# Check if at least one of the files has the explicit fp16 variant.
|
||||
at_least_one_fp16 = False
|
||||
for candidate in candidate_list:
|
||||
if len(candidate.path.suffixes) == 2 and candidate.path.suffixes[0] == ".fp16":
|
||||
if len(candidate.path.suffixes) == 2 and candidate.path.suffixes[0].startswith(".fp16"):
|
||||
at_least_one_fp16 = True
|
||||
break
|
||||
|
||||
@@ -162,7 +171,16 @@ def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path
|
||||
# candidate.
|
||||
highest_score_candidate = max(candidate_list, key=lambda candidate: candidate.score)
|
||||
if highest_score_candidate:
|
||||
result.add(highest_score_candidate.path)
|
||||
pattern = r"^(.*?)-\d+-of-\d+(\.\w+)$"
|
||||
match = re.match(pattern, highest_score_candidate.path.as_posix())
|
||||
if match:
|
||||
for candidate in candidate_list:
|
||||
if candidate.path.as_posix().startswith(match.group(1)) and candidate.path.as_posix().endswith(
|
||||
match.group(2)
|
||||
):
|
||||
result.add(candidate.path)
|
||||
else:
|
||||
result.add(highest_score_candidate.path)
|
||||
|
||||
# If one of the architecture-related variants was specified and no files matched other than
|
||||
# config and text files then we return an empty list
|
||||
|
||||
0
invokeai/backend/sd3/__init__.py
Normal file
0
invokeai/backend/sd3/__init__.py
Normal file
0
invokeai/backend/sd3/extensions/__init__.py
Normal file
0
invokeai/backend/sd3/extensions/__init__.py
Normal file
58
invokeai/backend/sd3/extensions/inpaint_extension.py
Normal file
58
invokeai/backend/sd3/extensions/inpaint_extension.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import torch
|
||||
|
||||
|
||||
class InpaintExtension:
|
||||
"""A class for managing inpainting with SD3."""
|
||||
|
||||
def __init__(self, init_latents: torch.Tensor, inpaint_mask: torch.Tensor, noise: torch.Tensor):
|
||||
"""Initialize InpaintExtension.
|
||||
|
||||
Args:
|
||||
init_latents (torch.Tensor): The initial latents (i.e. un-noised at timestep 0).
|
||||
inpaint_mask (torch.Tensor): A mask specifying which elements to inpaint. Range [0, 1]. Values of 1 will be
|
||||
re-generated. Values of 0 will remain unchanged. Values between 0 and 1 can be used to blend the
|
||||
inpainted region with the background.
|
||||
noise (torch.Tensor): The noise tensor used to noise the init_latents.
|
||||
"""
|
||||
assert init_latents.dim() == inpaint_mask.dim() == noise.dim() == 4
|
||||
assert init_latents.shape[-2:] == inpaint_mask.shape[-2:] == noise.shape[-2:]
|
||||
|
||||
self._init_latents = init_latents
|
||||
self._inpaint_mask = inpaint_mask
|
||||
self._noise = noise
|
||||
|
||||
def _apply_mask_gradient_adjustment(self, t_prev: float) -> torch.Tensor:
|
||||
"""Applies inpaint mask gradient adjustment and returns the inpaint mask to be used at the current timestep."""
|
||||
# As we progress through the denoising process, we promote gradient regions of the mask to have a full weight of
|
||||
# 1.0. This helps to produce more coherent seams around the inpainted region. We experimented with a (small)
|
||||
# number of promotion strategies (e.g. gradual promotion based on timestep), but found that a simple cutoff
|
||||
# threshold worked well.
|
||||
# We use a small epsilon to avoid any potential issues with floating point precision.
|
||||
eps = 1e-4
|
||||
mask_gradient_t_cutoff = 0.5
|
||||
if t_prev > mask_gradient_t_cutoff:
|
||||
# Early in the denoising process, use the inpaint mask as-is.
|
||||
return self._inpaint_mask
|
||||
else:
|
||||
# After the cut-off, promote all non-zero mask values to 1.0.
|
||||
mask = self._inpaint_mask.where(self._inpaint_mask <= (0.0 + eps), 1.0)
|
||||
|
||||
return mask
|
||||
|
||||
def merge_intermediate_latents_with_init_latents(
|
||||
self, intermediate_latents: torch.Tensor, t_prev: float
|
||||
) -> torch.Tensor:
|
||||
"""Merge the intermediate latents with the initial latents for the current timestep using the inpaint mask. I.e.
|
||||
update the intermediate latents to keep the regions that are not being inpainted on the correct noise
|
||||
trajectory.
|
||||
|
||||
This function should be called after each denoising step.
|
||||
"""
|
||||
|
||||
mask = self._apply_mask_gradient_adjustment(t_prev)
|
||||
|
||||
# Noise the init latents for the current timestep.
|
||||
noised_init_latents = self._noise * t_prev + (1.0 - t_prev) * self._init_latents
|
||||
|
||||
# Merge the intermediate latents with the noised_init_latents using the inpaint_mask.
|
||||
return intermediate_latents * mask + noised_init_latents * (1.0 - mask)
|
||||
@@ -49,9 +49,32 @@ class FLUXConditioningInfo:
|
||||
return self
|
||||
|
||||
|
||||
@dataclass
|
||||
class SD3ConditioningInfo:
|
||||
clip_l_pooled_embeds: torch.Tensor
|
||||
clip_l_embeds: torch.Tensor
|
||||
clip_g_pooled_embeds: torch.Tensor
|
||||
clip_g_embeds: torch.Tensor
|
||||
t5_embeds: torch.Tensor | None
|
||||
|
||||
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
|
||||
self.clip_l_pooled_embeds = self.clip_l_pooled_embeds.to(device=device, dtype=dtype)
|
||||
self.clip_l_embeds = self.clip_l_embeds.to(device=device, dtype=dtype)
|
||||
self.clip_g_pooled_embeds = self.clip_g_pooled_embeds.to(device=device, dtype=dtype)
|
||||
self.clip_g_embeds = self.clip_g_embeds.to(device=device, dtype=dtype)
|
||||
if self.t5_embeds is not None:
|
||||
self.t5_embeds = self.t5_embeds.to(device=device, dtype=dtype)
|
||||
return self
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConditioningFieldData:
|
||||
conditionings: List[BasicConditioningInfo] | List[SDXLConditioningInfo] | List[FLUXConditioningInfo]
|
||||
conditionings: (
|
||||
List[BasicConditioningInfo]
|
||||
| List[SDXLConditioningInfo]
|
||||
| List[FLUXConditioningInfo]
|
||||
| List[SD3ConditioningInfo]
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -9,6 +9,7 @@ const config: KnipConfig = {
|
||||
'src/services/api/schema.ts',
|
||||
'src/features/nodes/types/v1/**',
|
||||
'src/features/nodes/types/v2/**',
|
||||
'src/features/parameters/types/parameterSchemas.ts',
|
||||
// TODO(psyche): maybe we can clean up these utils after canvas v2 release
|
||||
'src/features/controlLayers/konva/util.ts',
|
||||
// TODO(psyche): restore HRF functionality?
|
||||
|
||||
@@ -52,13 +52,13 @@
|
||||
}
|
||||
},
|
||||
"dependencies": {
|
||||
"@atlaskit/pragmatic-drag-and-drop": "^1.4.0",
|
||||
"@atlaskit/pragmatic-drag-and-drop-auto-scroll": "^1.4.0",
|
||||
"@atlaskit/pragmatic-drag-and-drop-hitbox": "^1.0.3",
|
||||
"@dagrejs/dagre": "^1.1.4",
|
||||
"@dagrejs/graphlib": "^2.2.4",
|
||||
"@dnd-kit/core": "^6.1.0",
|
||||
"@dnd-kit/sortable": "^8.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@fontsource-variable/inter": "^5.1.0",
|
||||
"@invoke-ai/ui-library": "^0.0.43",
|
||||
"@invoke-ai/ui-library": "^0.0.44",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
|
||||
167
invokeai/frontend/web/pnpm-lock.yaml
generated
167
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -5,27 +5,27 @@ settings:
|
||||
excludeLinksFromLockfile: false
|
||||
|
||||
dependencies:
|
||||
'@atlaskit/pragmatic-drag-and-drop':
|
||||
specifier: ^1.4.0
|
||||
version: 1.4.0
|
||||
'@atlaskit/pragmatic-drag-and-drop-auto-scroll':
|
||||
specifier: ^1.4.0
|
||||
version: 1.4.0
|
||||
'@atlaskit/pragmatic-drag-and-drop-hitbox':
|
||||
specifier: ^1.0.3
|
||||
version: 1.0.3
|
||||
'@dagrejs/dagre':
|
||||
specifier: ^1.1.4
|
||||
version: 1.1.4
|
||||
'@dagrejs/graphlib':
|
||||
specifier: ^2.2.4
|
||||
version: 2.2.4
|
||||
'@dnd-kit/core':
|
||||
specifier: ^6.1.0
|
||||
version: 6.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@dnd-kit/sortable':
|
||||
specifier: ^8.0.0
|
||||
version: 8.0.0(@dnd-kit/core@6.1.0)(react@18.3.1)
|
||||
'@dnd-kit/utilities':
|
||||
specifier: ^3.2.2
|
||||
version: 3.2.2(react@18.3.1)
|
||||
'@fontsource-variable/inter':
|
||||
specifier: ^5.1.0
|
||||
version: 5.1.0
|
||||
'@invoke-ai/ui-library':
|
||||
specifier: ^0.0.43
|
||||
version: 0.0.43(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
specifier: ^0.0.44
|
||||
version: 0.0.44(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@nanostores/react':
|
||||
specifier: ^0.7.3
|
||||
version: 0.7.3(nanostores@0.11.3)(react@18.3.1)
|
||||
@@ -319,6 +319,28 @@ packages:
|
||||
'@jridgewell/trace-mapping': 0.3.25
|
||||
dev: true
|
||||
|
||||
/@atlaskit/pragmatic-drag-and-drop-auto-scroll@1.4.0:
|
||||
resolution: {integrity: sha512-5GoikoTSW13UX76F9TDeWB8x3jbbGlp/Y+3aRkHe1MOBMkrWkwNpJ42MIVhhX/6NSeaZiPumP0KbGJVs2tOWSQ==}
|
||||
dependencies:
|
||||
'@atlaskit/pragmatic-drag-and-drop': 1.4.0
|
||||
'@babel/runtime': 7.25.7
|
||||
dev: false
|
||||
|
||||
/@atlaskit/pragmatic-drag-and-drop-hitbox@1.0.3:
|
||||
resolution: {integrity: sha512-/Sbu/HqN2VGLYBhnsG7SbRNg98XKkbF6L7XDdBi+izRybfaK1FeMfodPpm/xnBHPJzwYMdkE0qtLyv6afhgMUA==}
|
||||
dependencies:
|
||||
'@atlaskit/pragmatic-drag-and-drop': 1.4.0
|
||||
'@babel/runtime': 7.25.7
|
||||
dev: false
|
||||
|
||||
/@atlaskit/pragmatic-drag-and-drop@1.4.0:
|
||||
resolution: {integrity: sha512-qRY3PTJIcxfl/QB8Gwswz+BRvlmgAC5pB+J2hL6dkIxgqAgVwOhAamMUKsrOcFU/axG2Q7RbNs1xfoLKDuhoPg==}
|
||||
dependencies:
|
||||
'@babel/runtime': 7.25.7
|
||||
bind-event-listener: 3.0.0
|
||||
raf-schd: 4.0.3
|
||||
dev: false
|
||||
|
||||
/@babel/code-frame@7.25.7:
|
||||
resolution: {integrity: sha512-0xZJFNE5XMpENsgfHYTw8FbX4kv53mFLn2i3XPoq69LyhYSCBJtitaHx9QnsVTrsogI4Z3+HtEfZ2/GFPOtf5g==}
|
||||
engines: {node: '>=6.9.0'}
|
||||
@@ -493,8 +515,8 @@ packages:
|
||||
resolution: {integrity: sha512-MV6D4VLRIHr4PkW4zMyqfrNS1mPlCTiCXwvYGtDFQYr+xHFfonhAuf9WjsSc0nyp2m0OdkSLnzmVKkZFLo25Tg==}
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/anatomy@2.3.4:
|
||||
resolution: {integrity: sha512-fFIYN7L276gw0Q7/ikMMlZxP7mvnjRaWJ7f3Jsf9VtDOi6eAYIBRrhQe6+SZ0PGmoOkRaBc7gSE5oeIbgFFyrw==}
|
||||
/@chakra-ui/anatomy@2.3.5:
|
||||
resolution: {integrity: sha512-3im33cUOxCbISjaBlINE2u8BOwJSCdzpjCX0H+0JxK2xz26UaVA5xeI3NYHUoxDnr/QIrgfrllGxS0szYwOcyg==}
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/breakpoint-utils@2.0.8:
|
||||
@@ -551,12 +573,12 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/hooks@2.4.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-LRKiVE1oA7afT5tbbSKAy7Uas2xFHE6IkrQdbhWCHmkHBUtPvjQQDgwtnd4IRZPmoEfNGwoJ/MQpwOM/NRTTwA==}
|
||||
/@chakra-ui/hooks@2.4.3(react@18.3.1):
|
||||
resolution: {integrity: sha512-Sr2zsoTZw3p7HbrUy4aLpTIkE2XXUelAUgg3NGwMzrmx75bE0qVyiuuTFOuyEzGxYVV2Fe8QtcKKilm6RwzTGg==}
|
||||
peerDependencies:
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
'@zag-js/element-size': 0.31.1
|
||||
copy-to-clipboard: 3.3.3
|
||||
framesync: 6.1.2
|
||||
@@ -574,13 +596,13 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/icons@2.2.4(@chakra-ui/react@2.10.2)(react@18.3.1):
|
||||
/@chakra-ui/icons@2.2.4(@chakra-ui/react@2.10.4)(react@18.3.1):
|
||||
resolution: {integrity: sha512-l5QdBgwrAg3Sc2BRqtNkJpfuLw/pWRDwwT58J6c4PqQT6wzXxyNa8Q0PForu1ltB5qEiFb1kxr/F/HO1EwNa6g==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/react': '>=2.0.0'
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/react': 2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.4(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@@ -803,8 +825,8 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/react@2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-TfIHTqTlxTHYJZBtpiR5EZasPUrLYKJxdbHkdOJb5G1OQ+2c5kKl5XA7c2pMtsEptzb7KxAAIB62t3hxdfWp1w==}
|
||||
/@chakra-ui/react@2.10.4(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-XyRWnuZ1Uw7Mlj5pKUGO5/WhnIHP/EOrpy6lGZC1yWlkd0eIfIpYMZ1ALTZx4KPEdbBaes48dgiMT2ROCqLhkA==}
|
||||
peerDependencies:
|
||||
'@emotion/react': '>=11'
|
||||
'@emotion/styled': '>=11'
|
||||
@@ -812,10 +834,10 @@ packages:
|
||||
react: '>=18'
|
||||
react-dom: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/hooks': 2.4.2(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme': 3.4.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/hooks': 2.4.3(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.12.1(react@18.3.1)
|
||||
'@chakra-ui/theme': 3.4.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.11)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.11)(react@18.3.1)
|
||||
'@popperjs/core': 2.11.8
|
||||
@@ -846,10 +868,10 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/styled-system@2.11.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-y++z2Uop+hjfZX9mbH88F1ikazPv32asD2er56zMJBemUAzweXnHTpiCQbluEDSUDhqmghVZAdb+5L4XLbsRxA==}
|
||||
/@chakra-ui/styled-system@2.12.1(react@18.3.1):
|
||||
resolution: {integrity: sha512-DQph1nDiCPtgze7nDe0a36530ByXb5VpPosKGyWMvKocVeZJcDtYG6XM0+V5a0wKuFBXsViBBRIFUTiUesJAcg==}
|
||||
dependencies:
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
csstype: 3.1.3
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
@@ -893,14 +915,14 @@ packages:
|
||||
color2k: 2.0.3
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/theme-tools@2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1):
|
||||
resolution: {integrity: sha512-3UhKPyzKbV3l/bg1iQN9PBvffYp+EBOoYMUaeTUdieQRPFzo2jbYR0lNCxqv8h5aGM/k54nCHU2M/GStyi9F2A==}
|
||||
/@chakra-ui/theme-tools@2.2.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-K/VJd0QcnKik7m+qZTkggqNLep6+MPUu8IP5TUpHsnSM5R/RVjsJIR7gO8IZVAIMIGLLTIhGshHxeMekqv6LcQ==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/styled-system': '>=2.0.0'
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/anatomy': 2.3.5
|
||||
'@chakra-ui/styled-system': 2.12.1(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
color2k: 2.0.3
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
@@ -926,15 +948,15 @@ packages:
|
||||
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/theme@3.4.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1):
|
||||
resolution: {integrity: sha512-ZwFBLfiMC3URwaO31ONXoKH9k0TX0OW3UjdPF3EQkQpYyrk/fm36GkkzajjtdpWEd7rzDLRsQjPmvwNaSoNDtg==}
|
||||
/@chakra-ui/theme@3.4.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-pfewthgZTFNUYeUwGvhPQO/FTIyf375cFV1AT8N1y0aJiw4KDe7YTGm7p0aFy4AwAjH2ydMgeEx/lua4tx8qyQ==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/styled-system': '>=2.8.0'
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/anatomy': 2.3.5
|
||||
'@chakra-ui/styled-system': 2.12.1(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
dev: false
|
||||
@@ -959,8 +981,8 @@ packages:
|
||||
lodash.mergewith: 4.6.2
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/utils@2.2.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-jUPLT0JzRMWxpdzH6c+t0YMJYrvc5CLericgITV3zDSXblkfx3DsYXqU11DJTSGZI9dUKzM1Wd0Wswn4eJwvFQ==}
|
||||
/@chakra-ui/utils@2.2.3(react@18.3.1):
|
||||
resolution: {integrity: sha512-cldoCQuexZ6e07/9hWHKD4l1QXXlM1Nax9tuQOBvVf/EgwNZt3nZu8zZRDFlhAOKCTQDkmpLTTu+eXXjChNQOw==}
|
||||
peerDependencies:
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
@@ -980,49 +1002,6 @@ packages:
|
||||
engines: {node: '>17.0.0'}
|
||||
dev: false
|
||||
|
||||
/@dnd-kit/accessibility@3.1.0(react@18.3.1):
|
||||
resolution: {integrity: sha512-ea7IkhKvlJUv9iSHJOnxinBcoOI3ppGnnL+VDJ75O45Nss6HtZd8IdN8touXPDtASfeI2T2LImb8VOZcL47wjQ==}
|
||||
peerDependencies:
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
react: 18.3.1
|
||||
tslib: 2.7.0
|
||||
dev: false
|
||||
|
||||
/@dnd-kit/core@6.1.0(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-J3cQBClB4TVxwGo3KEjssGEXNJqGVWx17aRTZ1ob0FliR5IjYgTxl5YJbKTzA6IzrtelotH19v6y7uoIRUZPSg==}
|
||||
peerDependencies:
|
||||
react: '>=16.8.0'
|
||||
react-dom: '>=16.8.0'
|
||||
dependencies:
|
||||
'@dnd-kit/accessibility': 3.1.0(react@18.3.1)
|
||||
'@dnd-kit/utilities': 3.2.2(react@18.3.1)
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
tslib: 2.7.0
|
||||
dev: false
|
||||
|
||||
/@dnd-kit/sortable@8.0.0(@dnd-kit/core@6.1.0)(react@18.3.1):
|
||||
resolution: {integrity: sha512-U3jk5ebVXe1Lr7c2wU7SBZjcWdQP+j7peHJfCspnA81enlu88Mgd7CC8Q+pub9ubP7eKVETzJW+IBAhsqbSu/g==}
|
||||
peerDependencies:
|
||||
'@dnd-kit/core': ^6.1.0
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
'@dnd-kit/core': 6.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@dnd-kit/utilities': 3.2.2(react@18.3.1)
|
||||
react: 18.3.1
|
||||
tslib: 2.7.0
|
||||
dev: false
|
||||
|
||||
/@dnd-kit/utilities@3.2.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-+MKAJEOfaBe5SmV6t34p80MMKhjvUz0vRrvVJbPT0WElzaOJ/1xs+D+KDv+tD/NE5ujfrChEcshd4fLn0wpiqg==}
|
||||
peerDependencies:
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
react: 18.3.1
|
||||
tslib: 2.7.0
|
||||
dev: false
|
||||
|
||||
/@emotion/babel-plugin@11.12.0:
|
||||
resolution: {integrity: sha512-y2WQb+oP8Jqvvclh8Q55gLUyb7UFvgv7eJfsj7td5TToBrIUtPay2kMrZi4xjq9qw2vD0ZR5fSho0yqoFgX7Rw==}
|
||||
dependencies:
|
||||
@@ -1696,20 +1675,20 @@ packages:
|
||||
prettier: 3.3.3
|
||||
dev: true
|
||||
|
||||
/@invoke-ai/ui-library@0.0.43(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-t3fPYyks07ue3dEBPJuTHbeDLnDckDCOrtvc07mMDbLOnlPEZ0StaeiNGH+oO8qLzAuMAlSTdswgHfzTc2MmPw==}
|
||||
/@invoke-ai/ui-library@0.0.44(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-PDseHmdr8oi8cmrpx3UwIYHn4NduAJX2R0pM0pyM54xrCMPMgYiCbC/eOs8Gt4fBc2ziiPZ9UGoW4evnE3YJsg==}
|
||||
peerDependencies:
|
||||
'@fontsource-variable/inter': ^5.0.16
|
||||
react: ^18.2.0
|
||||
react-dom: ^18.2.0
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/icons': 2.2.4(@chakra-ui/react@2.10.2)(react@18.3.1)
|
||||
'@chakra-ui/anatomy': 2.2.2
|
||||
'@chakra-ui/icons': 2.2.4(@chakra-ui/react@2.10.4)(react@18.3.1)
|
||||
'@chakra-ui/layout': 2.3.1(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/portal': 2.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.4(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.9.2
|
||||
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.11)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.11)(react@18.3.1)
|
||||
'@fontsource-variable/inter': 5.1.0
|
||||
@@ -4313,6 +4292,10 @@ packages:
|
||||
open: 8.4.2
|
||||
dev: true
|
||||
|
||||
/bind-event-listener@3.0.0:
|
||||
resolution: {integrity: sha512-PJvH288AWQhKs2v9zyfYdPzlPqf5bXbGMmhmUIY9x4dAUGIWgomO771oBQNwJnMQSnUIXhKu6sgzpBRXTlvb8Q==}
|
||||
dev: false
|
||||
|
||||
/bl@4.1.0:
|
||||
resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==}
|
||||
dependencies:
|
||||
@@ -7557,6 +7540,10 @@ packages:
|
||||
resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==}
|
||||
dev: true
|
||||
|
||||
/raf-schd@4.0.3:
|
||||
resolution: {integrity: sha512-tQkJl2GRWh83ui2DiPTJz9wEiMN20syf+5oKfB03yYP7ioZcJwsIK8FjrtLwH1m7C7e+Tt2yYBlrOpdT+dyeIQ==}
|
||||
dev: false
|
||||
|
||||
/raf-throttle@2.0.6:
|
||||
resolution: {integrity: sha512-C7W6hy78A+vMmk5a/B6C5szjBHrUzWJkVyakjKCK59Uy2CcA7KhO1JUvvH32IXYFIcyJ3FMKP3ZzCc2/71I6Vg==}
|
||||
dev: false
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 895 KiB |
@@ -768,7 +768,8 @@
|
||||
"deletedPrivateBoardsCannotbeRestored": "Gelöschte Boards können nicht wiederhergestellt werden. Wenn Sie „Nur Board löschen“ wählen, werden die Bilder in einen privaten, nicht kategorisierten Status für den Ersteller des Bildes versetzt.",
|
||||
"assetsWithCount_one": "{{count}} in der Sammlung",
|
||||
"assetsWithCount_other": "{{count}} in der Sammlung",
|
||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand."
|
||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand.",
|
||||
"updateBoardError": "Fehler beim Aktualisieren des Ordners"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@@ -871,7 +872,9 @@
|
||||
"recallParameter": "{{label}} Abrufen",
|
||||
"parsingFailed": "Parsing Fehlgeschlagen",
|
||||
"canvasV2Metadata": "Leinwand",
|
||||
"guidance": "Führung"
|
||||
"guidance": "Führung",
|
||||
"seamlessXAxis": "Nahtlose X Achse",
|
||||
"seamlessYAxis": "Nahtlose Y Achse"
|
||||
},
|
||||
"popovers": {
|
||||
"noiseUseCPU": {
|
||||
@@ -1392,7 +1395,13 @@
|
||||
"pullBboxIntoLayerOk": "Bbox in die Ebene gezogen",
|
||||
"saveBboxToGallery": "Bbox in Galerie speichern",
|
||||
"tool": {
|
||||
"bbox": "Bbox"
|
||||
"bbox": "Bbox",
|
||||
"brush": "Pinsel",
|
||||
"eraser": "Radiergummi",
|
||||
"colorPicker": "Farbwähler",
|
||||
"view": "Ansicht",
|
||||
"rectangle": "Rechteck",
|
||||
"move": "Verschieben"
|
||||
},
|
||||
"transform": {
|
||||
"fitToBbox": "An Bbox anpassen",
|
||||
@@ -1434,7 +1443,6 @@
|
||||
"deleteReferenceImage": "Referenzbild löschen",
|
||||
"referenceImage": "Referenzbild",
|
||||
"opacity": "Opazität",
|
||||
"resetCanvas": "Leinwand zurücksetzen",
|
||||
"removeBookmark": "Lesezeichen entfernen",
|
||||
"rasterLayer": "Raster-Ebene",
|
||||
"rasterLayers_withCount_visible": "Raster-Ebenen ({{count}})",
|
||||
@@ -1511,7 +1519,30 @@
|
||||
"layer_one": "Ebene",
|
||||
"layer_other": "Ebenen",
|
||||
"layer_withCount_one": "Ebene ({{count}})",
|
||||
"layer_withCount_other": "Ebenen ({{count}})"
|
||||
"layer_withCount_other": "Ebenen ({{count}})",
|
||||
"fill": {
|
||||
"fillStyle": "Füllstil",
|
||||
"diagonal": "Diagonal",
|
||||
"vertical": "Vertikal",
|
||||
"fillColor": "Füllfarbe",
|
||||
"grid": "Raster",
|
||||
"solid": "Solide",
|
||||
"crosshatch": "Kreuzschraffur",
|
||||
"horizontal": "Horizontal"
|
||||
},
|
||||
"filter": {
|
||||
"apply": "Anwenden",
|
||||
"reset": "Zurücksetzen",
|
||||
"cancel": "Abbrechen",
|
||||
"spandrel_filter": {
|
||||
"label": "Bild-zu-Bild Modell",
|
||||
"description": "Ein Bild-zu-Bild Modell auf der ausgewählten Ebene ausführen.",
|
||||
"model": "Modell"
|
||||
},
|
||||
"filters": "Filter",
|
||||
"filterType": "Filtertyp",
|
||||
"filter": "Filter"
|
||||
}
|
||||
},
|
||||
"upsell": {
|
||||
"shareAccess": "Zugang teilen",
|
||||
|
||||
@@ -122,6 +122,7 @@
|
||||
"goTo": "Go to",
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"loadingImage": "Loading Image",
|
||||
"loadingModel": "Loading Model",
|
||||
"imageFailedToLoad": "Unable to Load Image",
|
||||
"img2img": "Image To Image",
|
||||
"inpaint": "inpaint",
|
||||
@@ -174,7 +175,8 @@
|
||||
"placeholderSelectAModel": "Select a model",
|
||||
"reset": "Reset",
|
||||
"none": "None",
|
||||
"new": "New"
|
||||
"new": "New",
|
||||
"generating": "Generating"
|
||||
},
|
||||
"hrf": {
|
||||
"hrf": "High Resolution Fix",
|
||||
@@ -261,7 +263,8 @@
|
||||
"iterations_one": "Iteration",
|
||||
"iterations_other": "Iterations",
|
||||
"generations_one": "Generation",
|
||||
"generations_other": "Generations"
|
||||
"generations_other": "Generations",
|
||||
"batchSize": "Batch Size"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "Invocation Cache",
|
||||
@@ -704,6 +707,8 @@
|
||||
"baseModel": "Base Model",
|
||||
"cancel": "Cancel",
|
||||
"clipEmbed": "CLIP Embed",
|
||||
"clipLEmbed": "CLIP-L Embed",
|
||||
"clipGEmbed": "CLIP-G Embed",
|
||||
"config": "Config",
|
||||
"convert": "Convert",
|
||||
"convertingModelBegin": "Converting Model. Please wait.",
|
||||
@@ -973,6 +978,8 @@
|
||||
"zoomOutNodes": "Zoom Out",
|
||||
"betaDesc": "This invocation is in beta. Until it is stable, it may have breaking changes during app updates. We plan to support this invocation long-term.",
|
||||
"prototypeDesc": "This invocation is a prototype. It may have breaking changes during app updates and may be removed at any time.",
|
||||
"internalDesc": "This invocation is used internally by Invoke. It may have breaking changes during app updates and may be removed at any time.",
|
||||
"specialDesc": "This invocation some special handling in the app. For example, Batch nodes are used to queue multiple graphs from a single workflow.",
|
||||
"imageAccessError": "Unable to find image {{image_name}}, resetting to default",
|
||||
"boardAccessError": "Unable to find board {{board_id}}, resetting to default",
|
||||
"modelAccessError": "Unable to find model {{key}}, resetting to default",
|
||||
@@ -997,6 +1004,7 @@
|
||||
"controlNetControlMode": "Control Mode",
|
||||
"copyImage": "Copy Image",
|
||||
"denoisingStrength": "Denoising Strength",
|
||||
"disabledNoRasterContent": "Disabled (No Raster Content)",
|
||||
"downloadImage": "Download Image",
|
||||
"general": "General",
|
||||
"guidance": "Guidance",
|
||||
@@ -1010,8 +1018,11 @@
|
||||
"addingImagesTo": "Adding images to",
|
||||
"invoke": "Invoke",
|
||||
"missingFieldTemplate": "Missing field template",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} missing input",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}}: missing input",
|
||||
"missingNodeTemplate": "Missing node template",
|
||||
"collectionEmpty": "{{nodeLabel}} -> {{fieldLabel}} empty collection",
|
||||
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}}: too few items, minimum {{minItems}}",
|
||||
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}}: too many items, maximum {{maxItems}}",
|
||||
"noModelSelected": "No model selected",
|
||||
"noT5EncoderModelSelected": "No T5 Encoder model selected for FLUX generation",
|
||||
"noFLUXVAEModelSelected": "No VAE model selected for FLUX generation",
|
||||
@@ -1020,10 +1031,11 @@
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bbox height is {{height}}",
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), scaled bbox width is {{width}}",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), scaled bbox height is {{height}}",
|
||||
"canvasIsFiltering": "Canvas is filtering",
|
||||
"canvasIsTransforming": "Canvas is transforming",
|
||||
"canvasIsRasterizing": "Canvas is rasterizing",
|
||||
"canvasIsCompositing": "Canvas is compositing",
|
||||
"canvasIsFiltering": "Canvas is busy (filtering)",
|
||||
"canvasIsTransforming": "Canvas is busy (transforming)",
|
||||
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
|
||||
"canvasIsCompositing": "Canvas is busy (compositing)",
|
||||
"canvasIsSelectingObject": "Canvas is busy (selecting object)",
|
||||
"noPrompts": "No prompts generated",
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"systemDisconnected": "System disconnected",
|
||||
@@ -1136,6 +1148,7 @@
|
||||
"resetWebUI": "Reset Web UI",
|
||||
"resetWebUIDesc1": "Resetting the web UI only resets the browser's local cache of your images and remembered settings. It does not delete any images from disk.",
|
||||
"resetWebUIDesc2": "If images aren't showing up in the gallery or something else isn't working, please try resetting before submitting an issue on GitHub.",
|
||||
"showDetailedInvocationProgress": "Show Progress Details",
|
||||
"showProgressInViewer": "Show Progress Images in Viewer",
|
||||
"ui": "User Interface",
|
||||
"clearIntermediatesDisabled": "Queue must be empty to clear intermediates",
|
||||
@@ -1306,8 +1319,9 @@
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Begin / End Step Percentage",
|
||||
"paragraphs": [
|
||||
"The part of the of the denoising process that will have the Control Adapter applied.",
|
||||
"Generally, Control Adapters applied at the start of the process guide composition, and Control Adapters applied at the end guide details."
|
||||
"This setting determines which portion of the denoising (generation) process incorporates the guidance from this layer.",
|
||||
"• Start Step (%): Specifies when to begin applying the guidance from this layer during the generation process.",
|
||||
"• End Step (%): Specifies when to stop applying this layer's guidance and revert general guidance from the model and other settings."
|
||||
]
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
@@ -1317,7 +1331,7 @@
|
||||
"controlNetProcessor": {
|
||||
"heading": "Processor",
|
||||
"paragraphs": [
|
||||
"Method of processing the input image to guide the generation process. Different processors will providedifferent effects or styles in your generated images."
|
||||
"Method of processing the input image to guide the generation process. Different processors will provide different effects or styles in your generated images."
|
||||
]
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
@@ -1325,13 +1339,15 @@
|
||||
"paragraphs": ["Method to fit Control Adapter's input image size to the output generation size."]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Method",
|
||||
"paragraphs": ["Method by which to apply the current IP Adapter."]
|
||||
"heading": "Mode",
|
||||
"paragraphs": ["The mode defines how the reference image will guide the generation process."]
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "Weight",
|
||||
"paragraphs": [
|
||||
"Weight of the Control Adapter. Higher weight will lead to larger impacts on the final image."
|
||||
"Adjusts how strongly the layer influences the generation process",
|
||||
"• Higher Weight (.75-2): Creates a more significant impact on the final result.",
|
||||
"• Lower Weight (0-.75): Creates a smaller impact on the final result."
|
||||
]
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
@@ -1412,8 +1428,9 @@
|
||||
"paramDenoisingStrength": {
|
||||
"heading": "Denoising Strength",
|
||||
"paragraphs": [
|
||||
"How much noise is added to the input image.",
|
||||
"0 will result in an identical image, while 1 will result in a completely new image."
|
||||
"Controls how much the generated image varies from the raster layer(s).",
|
||||
"Lower strength stays closer to the combined visible raster layers. Higher strength relies more on the global prompt.",
|
||||
"When there are no raster layers with visible content, this setting is ignored."
|
||||
]
|
||||
},
|
||||
"paramHeight": {
|
||||
@@ -1652,7 +1669,6 @@
|
||||
"newControlLayerError": "Problem Creating Control Layer",
|
||||
"newRasterLayerOk": "Created Raster Layer",
|
||||
"newRasterLayerError": "Problem Creating Raster Layer",
|
||||
"newFromImage": "New from Image",
|
||||
"pullBboxIntoLayerOk": "Bbox Pulled Into Layer",
|
||||
"pullBboxIntoLayerError": "Problem Pulling BBox Into Layer",
|
||||
"pullBboxIntoReferenceImageOk": "Bbox Pulled Into ReferenceImage",
|
||||
@@ -1662,13 +1678,14 @@
|
||||
"mergeDown": "Merge Down",
|
||||
"mergeVisibleOk": "Merged layers",
|
||||
"mergeVisibleError": "Error merging layers",
|
||||
"mergingLayers": "Merging layers",
|
||||
"clearHistory": "Clear History",
|
||||
"bboxOverlay": "Show Bbox Overlay",
|
||||
"resetCanvas": "Reset Canvas",
|
||||
"newSession": "New Session",
|
||||
"clearCaches": "Clear Caches",
|
||||
"recalculateRects": "Recalculate Rects",
|
||||
"clipToBbox": "Clip Strokes to Bbox",
|
||||
"outputOnlyMaskedRegions": "Output Only Masked Regions",
|
||||
"outputOnlyMaskedRegions": "Output Only Generated Regions",
|
||||
"addLayer": "Add Layer",
|
||||
"duplicate": "Duplicate",
|
||||
"moveToFront": "Move to Front",
|
||||
@@ -1696,8 +1713,10 @@
|
||||
"controlLayer": "Control Layer",
|
||||
"inpaintMask": "Inpaint Mask",
|
||||
"regionalGuidance": "Regional Guidance",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) as $t(controlLayers.rasterLayer)",
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) as $t(controlLayers.controlLayer)",
|
||||
"asRasterLayer": "As $t(controlLayers.rasterLayer)",
|
||||
"asRasterLayerResize": "As $t(controlLayers.rasterLayer) (Resize)",
|
||||
"asControlLayer": "As $t(controlLayers.controlLayer)",
|
||||
"asControlLayerResize": "As $t(controlLayers.controlLayer) (Resize)",
|
||||
"referenceImage": "Reference Image",
|
||||
"regionalReferenceImage": "Regional Reference Image",
|
||||
"globalReferenceImage": "Global Reference Image",
|
||||
@@ -1765,6 +1784,7 @@
|
||||
"pullBboxIntoLayer": "Pull Bbox into Layer",
|
||||
"pullBboxIntoReferenceImage": "Pull Bbox into Reference Image",
|
||||
"showProgressOnCanvas": "Show Progress on Canvas",
|
||||
"useImage": "Use Image",
|
||||
"prompt": "Prompt",
|
||||
"negativePrompt": "Negative Prompt",
|
||||
"beginEndStepPercentShort": "Begin/End %",
|
||||
@@ -1773,19 +1793,26 @@
|
||||
"newGallerySessionDesc": "This will clear the canvas and all settings except for your model selection. Generations will be sent to the gallery.",
|
||||
"newCanvasSession": "New Canvas Session",
|
||||
"newCanvasSessionDesc": "This will clear the canvas and all settings except for your model selection. Generations will be staged on the canvas.",
|
||||
"resetCanvasLayers": "Reset Canvas Layers",
|
||||
"resetGenerationSettings": "Reset Generation Settings",
|
||||
"replaceCurrent": "Replace Current",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer to get started.",
|
||||
"controlMode": {
|
||||
"controlMode": "Control Mode",
|
||||
"balanced": "Balanced",
|
||||
"balanced": "Balanced (recommended)",
|
||||
"prompt": "Prompt",
|
||||
"control": "Control",
|
||||
"megaControl": "Mega Control"
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"ipAdapterMethod": "IP Adapter Method",
|
||||
"full": "Full",
|
||||
"ipAdapterMethod": "Mode",
|
||||
"full": "Style and Composition",
|
||||
"fullDesc": "Applies visual style (colors, textures) & composition (layout, structure).",
|
||||
"style": "Style Only",
|
||||
"composition": "Composition Only"
|
||||
"styleDesc": "Applies visual style (colors, textures) without considering its layout.",
|
||||
"composition": "Composition Only",
|
||||
"compositionDesc": "Replicates layout & structure while ignoring the reference's style."
|
||||
},
|
||||
"fill": {
|
||||
"fillColor": "Fill Color",
|
||||
@@ -1815,6 +1842,9 @@
|
||||
"process": "Process",
|
||||
"apply": "Apply",
|
||||
"cancel": "Cancel",
|
||||
"advanced": "Advanced",
|
||||
"processingLayerWith": "Processing layer with the {{type}} filter.",
|
||||
"forMoreControl": "For more control, click Advanced below.",
|
||||
"spandrel_filter": {
|
||||
"label": "Image-to-Image Model",
|
||||
"description": "Run an image-to-image model on the selected layer.",
|
||||
@@ -1992,7 +2022,9 @@
|
||||
"upscaleModelDesc": "Upscale (image to image) model",
|
||||
"missingUpscaleInitialImage": "Missing initial image for upscaling",
|
||||
"missingUpscaleModel": "Missing upscale model",
|
||||
"missingTileControlNetModel": "No valid tile ControlNet models installed"
|
||||
"missingTileControlNetModel": "No valid tile ControlNet models installed",
|
||||
"incompatibleBaseModel": "Unsupported main model architecture for upscaling",
|
||||
"incompatibleBaseModelDesc": "Upscaling is supported for SD1.5 and SDXL architecture models only. Change the main model to enable upscaling."
|
||||
},
|
||||
"stylePresets": {
|
||||
"active": "Active",
|
||||
@@ -2095,11 +2127,74 @@
|
||||
},
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"line1": "<ItalicComponent>Select Object</ItalicComponent> tool for precise object selection and editing",
|
||||
"line2": "Expanded Flux support, now with Global Reference Images",
|
||||
"line3": "Improved tooltips and context menus",
|
||||
"items": [
|
||||
"<StrongComponent>Workflows</StrongComponent>: Run a workflow for a collection of images using the new <StrongComponent>Image Batch</StrongComponent> node.",
|
||||
"<StrongComponent>FLUX</StrongComponent>: Support for XLabs IP Adapter v2."
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
"watchUiUpdatesOverview": "Watch UI Updates Overview"
|
||||
},
|
||||
"supportVideos": {
|
||||
"supportVideos": "Support Videos",
|
||||
"gettingStarted": "Getting Started",
|
||||
"controlCanvas": "Control Canvas",
|
||||
"watch": "Watch",
|
||||
"studioSessionsDesc1": "Check out the <StudioSessionsPlaylistLink /> for Invoke deep dives.",
|
||||
"studioSessionsDesc2": "Join our <DiscordLink /> to participate in the live sessions and ask questions. Sessions are uploaded to the playlist the following week.",
|
||||
"videos": {
|
||||
"creatingYourFirstImage": {
|
||||
"title": "Creating Your First Image",
|
||||
"description": "Introduction to creating an image from scratch using Invoke's tools."
|
||||
},
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"title": "Using Control Layers and Reference Guides",
|
||||
"description": "Learn how to guide your image creation with control layers and reference images."
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"title": "Understanding Image-to-Image and Denoising",
|
||||
"description": "Overview of image-to-image transformations and denoising in Invoke."
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"title": "Exploring AI Models and Concept Adapters",
|
||||
"description": "Dive into AI models and how to use concept adapters for creative control."
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"title": "Creating and Composing on Invoke's Control Canvas",
|
||||
"description": "Learn to compose images using Invoke's control canvas."
|
||||
},
|
||||
"upscaling": {
|
||||
"title": "Upscaling",
|
||||
"description": "How to upscale images with Invoke's tools to enhance resolution."
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"title": "How Do I Generate and Save to the Gallery?",
|
||||
"description": "Steps to generate and save images to the gallery."
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"title": "How Do I Edit on the Canvas?",
|
||||
"description": "Guide to editing images directly on the canvas."
|
||||
},
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"title": "How Do I Do Image-to-Image Transformation?",
|
||||
"description": "Tutorial on performing image-to-image transformations in Invoke."
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "How Do I Use Control Nets and Control Layers?",
|
||||
"description": "Learn to apply control layers and controlnets to your images."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"title": "How Do I Use Global IP Adapters and Reference Images?",
|
||||
"description": "Introduction to adding reference images and global IP adapters."
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "How Do I Use Inpaint Masks?",
|
||||
"description": "How to apply inpaint masks for image correction and variation."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"title": "How Do I Outpaint?",
|
||||
"description": "Guide to outpainting beyond the original image borders."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"discordLabel": "Discord",
|
||||
"back": "Atrás",
|
||||
"loading": "Cargando",
|
||||
"postprocessing": "Postprocesado",
|
||||
"postprocessing": "Postprocesamiento",
|
||||
"txt2img": "De texto a imagen",
|
||||
"accept": "Aceptar",
|
||||
"cancel": "Cancelar",
|
||||
@@ -64,7 +64,7 @@
|
||||
"prevPage": "Página Anterior",
|
||||
"red": "Rojo",
|
||||
"alpha": "Transparencia",
|
||||
"outputs": "Salidas",
|
||||
"outputs": "Resultados",
|
||||
"learnMore": "Aprende más",
|
||||
"enabled": "Activado",
|
||||
"disabled": "Desactivado",
|
||||
@@ -73,7 +73,32 @@
|
||||
"created": "Creado",
|
||||
"save": "Guardar",
|
||||
"unknownError": "Error Desconocido",
|
||||
"blue": "Azul"
|
||||
"blue": "Azul",
|
||||
"clipboard": "Portapapeles",
|
||||
"loadingImage": "Cargando la imagen",
|
||||
"inpaint": "inpaint",
|
||||
"ipAdapter": "Adaptador IP",
|
||||
"t2iAdapter": "Adaptador T2I",
|
||||
"apply": "Aplicar",
|
||||
"openInViewer": "Abrir en el visor",
|
||||
"off": "Apagar",
|
||||
"generating": "Generando",
|
||||
"ok": "De acuerdo",
|
||||
"placeholderSelectAModel": "Seleccionar un modelo",
|
||||
"reset": "Restablecer",
|
||||
"none": "Ninguno",
|
||||
"new": "Nuevo",
|
||||
"dontShowMeThese": "No mostrar estos",
|
||||
"loadingModel": "Cargando el modelo",
|
||||
"view": "Ver",
|
||||
"edit": "Editar",
|
||||
"safetensors": "Safetensors",
|
||||
"toResolve": "Para resolver",
|
||||
"localSystem": "Sistema local",
|
||||
"notInstalled": "No $t(common.installed)",
|
||||
"outpaint": "outpaint",
|
||||
"simple": "Sencillo",
|
||||
"close": "Cerrar"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Tamaño de la imagen",
|
||||
@@ -85,7 +110,63 @@
|
||||
"deleteImage_other": "Eliminar {{count}} Imágenes",
|
||||
"deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.",
|
||||
"assets": "Activos",
|
||||
"autoAssignBoardOnClick": "Asignación automática de tableros al hacer clic"
|
||||
"autoAssignBoardOnClick": "Asignar automática tableros al hacer clic",
|
||||
"gallery": "Galería",
|
||||
"noImageSelected": "Sin imágenes seleccionadas",
|
||||
"bulkDownloadRequestFailed": "Error al preparar la descarga",
|
||||
"oldestFirst": "La más antigua primero",
|
||||
"sideBySide": "conjuntamente",
|
||||
"selectForCompare": "Seleccionar para comparar",
|
||||
"alwaysShowImageSizeBadge": "Mostrar siempre las dimensiones de la imagen",
|
||||
"currentlyInUse": "Esta imagen se utiliza actualmente con las siguientes funciones:",
|
||||
"unableToLoad": "No se puede cargar la galería",
|
||||
"selectAllOnPage": "Seleccionar todo en la página",
|
||||
"selectAnImageToCompare": "Seleccione una imagen para comparar",
|
||||
"bulkDownloadFailed": "Error en la descarga",
|
||||
"compareHelp2": "Presione <Kbd> M </Kbd> para recorrer los modos de comparación.",
|
||||
"move": "Mover",
|
||||
"copy": "Copiar",
|
||||
"drop": "Gota",
|
||||
"displayBoardSearch": "Tablero de búsqueda",
|
||||
"deleteSelection": "Borrar selección",
|
||||
"downloadSelection": "Descargar selección",
|
||||
"openInViewer": "Abrir en el visor",
|
||||
"searchImages": "Búsqueda por metadatos",
|
||||
"swapImages": "Intercambiar imágenes",
|
||||
"sortDirection": "Orden de clasificación",
|
||||
"showStarredImagesFirst": "Mostrar imágenes destacadas primero",
|
||||
"go": "Ir",
|
||||
"bulkDownloadRequested": "Preparando la descarga",
|
||||
"image": "imagen",
|
||||
"compareHelp4": "Presione <Kbd> Z </Kbd> o <Kbd> Esc </Kbd> para salir.",
|
||||
"viewerImage": "Ver imagen",
|
||||
"dropOrUpload": "$t(gallery.drop) o cargar",
|
||||
"displaySearch": "Buscar imagen",
|
||||
"download": "Descargar",
|
||||
"exitBoardSearch": "Finalizar búsqueda",
|
||||
"exitSearch": "Salir de la búsqueda de imágenes",
|
||||
"featuresWillReset": "Si elimina esta imagen, dichas funciones se restablecerán inmediatamente.",
|
||||
"jump": "Omitir",
|
||||
"loading": "Cargando",
|
||||
"newestFirst": "La más nueva primero",
|
||||
"unstarImage": "Dejar de ser favorita",
|
||||
"bulkDownloadRequestedDesc": "Su solicitud de descarga se está preparando. Esto puede tardar unos minutos.",
|
||||
"hover": "Desplazar",
|
||||
"compareHelp1": "Mantenga presionada la tecla <Kbd> Alt </Kbd> mientras hace clic en una imagen de la galería o utiliza las teclas de flecha para cambiar la imagen de comparación.",
|
||||
"stretchToFit": "Estirar para encajar",
|
||||
"exitCompare": "Salir de la comparación",
|
||||
"starImage": "Imágenes favoritas",
|
||||
"dropToUpload": "$t(gallery.drop) para cargar",
|
||||
"slider": "Deslizador",
|
||||
"assetsTab": "Archivos que has cargado para utilizarlos en tus proyectos.",
|
||||
"imagesTab": "Imágenes que ha creado y guardado en Invoke.",
|
||||
"compareImage": "Comparar imagen",
|
||||
"boardsSettings": "Ajustes de los tableros",
|
||||
"imagesSettings": "Configuración de imágenes de la galería",
|
||||
"compareHelp3": "Presione <Kbd> C </Kbd> para intercambiar las imágenes comparadas.",
|
||||
"showArchivedBoards": "Mostrar paneles archivados",
|
||||
"closeViewer": "Cerrar visor",
|
||||
"openViewer": "Abrir visor"
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "Gestor de Modelos",
|
||||
@@ -131,7 +212,13 @@
|
||||
"modelDeleted": "Modelo eliminado",
|
||||
"modelDeleteFailed": "Error al borrar el modelo",
|
||||
"settings": "Ajustes",
|
||||
"syncModels": "Sincronizar las plantillas"
|
||||
"syncModels": "Sincronizar las plantillas",
|
||||
"clipEmbed": "Incrustar CLIP",
|
||||
"addModels": "Añadir modelos",
|
||||
"advanced": "Avanzado",
|
||||
"clipGEmbed": "Incrustar CLIP-G",
|
||||
"cancel": "Cancelar",
|
||||
"clipLEmbed": "Incrustar CLIP-L"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Imágenes",
|
||||
@@ -158,19 +245,19 @@
|
||||
"useSeed": "Usar Semilla",
|
||||
"useAll": "Usar Todo",
|
||||
"info": "Información",
|
||||
"showOptionsPanel": "Mostrar panel de opciones",
|
||||
"showOptionsPanel": "Mostrar panel lateral (O o T)",
|
||||
"symmetry": "Simetría",
|
||||
"copyImage": "Copiar la imagen",
|
||||
"general": "General",
|
||||
"denoisingStrength": "Intensidad de la eliminación del ruido",
|
||||
"seamlessXAxis": "Eje x",
|
||||
"seamlessYAxis": "Eje y",
|
||||
"seamlessXAxis": "Eje X sin juntas",
|
||||
"seamlessYAxis": "Eje Y sin juntas",
|
||||
"scheduler": "Programador",
|
||||
"positivePromptPlaceholder": "Prompt Positivo",
|
||||
"negativePromptPlaceholder": "Prompt Negativo",
|
||||
"controlNetControlMode": "Modo de control",
|
||||
"clipSkip": "Omitir el CLIP",
|
||||
"maskBlur": "Difuminar",
|
||||
"maskBlur": "Desenfoque de máscara",
|
||||
"patchmatchDownScaleSize": "Reducir a escala",
|
||||
"coherenceMode": "Modo"
|
||||
},
|
||||
@@ -202,16 +289,19 @@
|
||||
"serverError": "Error en el servidor",
|
||||
"canceled": "Procesando la cancelación",
|
||||
"connected": "Conectado al servidor",
|
||||
"uploadFailedInvalidUploadDesc": "Debe ser una sola imagen PNG o JPEG",
|
||||
"parameterSet": "Conjunto de parámetros",
|
||||
"parameterNotSet": "Parámetro no configurado",
|
||||
"uploadFailedInvalidUploadDesc": "Deben ser imágenes PNG o JPEG.",
|
||||
"parameterSet": "Parámetro recuperado",
|
||||
"parameterNotSet": "Parámetro no recuperado",
|
||||
"problemCopyingImage": "No se puede copiar la imagen",
|
||||
"errorCopied": "Error al copiar",
|
||||
"baseModelChanged": "Modelo base cambiado",
|
||||
"addedToBoard": "Añadido al tablero",
|
||||
"addedToBoard": "Se agregó a los activos del panel {{name}}",
|
||||
"baseModelChangedCleared_one": "Borrado o desactivado {{count}} submodelo incompatible",
|
||||
"baseModelChangedCleared_many": "Borrados o desactivados {{count}} submodelos incompatibles",
|
||||
"baseModelChangedCleared_other": "Borrados o desactivados {{count}} submodelos incompatibles"
|
||||
"baseModelChangedCleared_other": "Borrados o desactivados {{count}} submodelos incompatibles",
|
||||
"addedToUncategorized": "Añadido a los activos del tablero $t(boards.uncategorized)",
|
||||
"imagesWillBeAddedTo": "Las imágenes subidas se añadirán a los activos del panel {{boardName}}.",
|
||||
"layerCopiedToClipboard": "Capa copiada en el portapapeles"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Activar la barra de progreso",
|
||||
@@ -226,7 +316,8 @@
|
||||
"mode": "Modo",
|
||||
"submitSupportTicket": "Enviar Ticket de Soporte",
|
||||
"toggleRightPanel": "Activar o desactivar el panel derecho (G)",
|
||||
"toggleLeftPanel": "Activar o desactivar el panel izquierdo (T)"
|
||||
"toggleLeftPanel": "Activar o desactivar el panel izquierdo (T)",
|
||||
"uploadImages": "Cargar imagen(es)"
|
||||
},
|
||||
"nodes": {
|
||||
"zoomInNodes": "Acercar",
|
||||
@@ -238,7 +329,8 @@
|
||||
"showMinimapnodes": "Mostrar el minimapa",
|
||||
"reloadNodeTemplates": "Recargar las plantillas de nodos",
|
||||
"loadWorkflow": "Cargar el flujo de trabajo",
|
||||
"downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON"
|
||||
"downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON",
|
||||
"boardAccessError": "No se puede encontrar el panel {{board_id}}, se está restableciendo al valor predeterminado"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Agregar panel automáticamente",
|
||||
@@ -255,7 +347,7 @@
|
||||
"bottomMessage": "Al eliminar este panel y las imágenes que contiene, se restablecerán las funciones que los estén utilizando actualmente.",
|
||||
"deleteBoardAndImages": "Borrar el panel y las imágenes",
|
||||
"loading": "Cargando...",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al Seleccionar 'Borrar Solo el Panel' transferirá las imágenes a un estado sin categorizar.",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al Seleccionar 'Borrar solo el panel' transferirá las imágenes a un estado sin categorizar.",
|
||||
"move": "Mover",
|
||||
"menuItemAutoAdd": "Agregar automáticamente a este panel",
|
||||
"searchBoard": "Buscando paneles…",
|
||||
@@ -263,29 +355,33 @@
|
||||
"downloadBoard": "Descargar panel",
|
||||
"deleteBoardOnly": "Borrar solo el panel",
|
||||
"myBoard": "Mi panel",
|
||||
"noMatching": "No hay paneles que coincidan",
|
||||
"noMatching": "Sin paneles coincidentes",
|
||||
"imagesWithCount_one": "{{count}} imagen",
|
||||
"imagesWithCount_many": "{{count}} imágenes",
|
||||
"imagesWithCount_other": "{{count}} imágenes",
|
||||
"assetsWithCount_one": "{{count}} activo",
|
||||
"assetsWithCount_many": "{{count}} activos",
|
||||
"assetsWithCount_other": "{{count}} activos",
|
||||
"hideBoards": "Ocultar Paneles",
|
||||
"addPrivateBoard": "Agregar un tablero privado",
|
||||
"addSharedBoard": "Agregar Panel Compartido",
|
||||
"hideBoards": "Ocultar paneles",
|
||||
"addPrivateBoard": "Agregar un panel privado",
|
||||
"addSharedBoard": "Añadir panel compartido",
|
||||
"boards": "Paneles",
|
||||
"archiveBoard": "Archivar Panel",
|
||||
"archiveBoard": "Archivar panel",
|
||||
"archived": "Archivado",
|
||||
"selectedForAutoAdd": "Seleccionado para agregar automáticamente",
|
||||
"unarchiveBoard": "Desarchivar el tablero",
|
||||
"noBoards": "No hay tableros {{boardType}}",
|
||||
"shared": "Carpetas compartidas",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Los tableros eliminados no se pueden restaurar. Al elegir \"Eliminar solo tablero\", las imágenes se colocan en un estado privado y sin categoría para el creador de la imagen."
|
||||
"unarchiveBoard": "Desarchivar el panel",
|
||||
"noBoards": "No hay paneles {{boardType}}",
|
||||
"shared": "Paneles compartidos",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al elegir \"Eliminar solo el panel\", las imágenes se colocan en un estado privado y sin categoría para el creador de la imagen.",
|
||||
"viewBoards": "Ver paneles",
|
||||
"private": "Paneles privados",
|
||||
"updateBoardError": "No se pudo actualizar el panel"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
"title": "Composición",
|
||||
"infillTab": "Relleno"
|
||||
"infillTab": "Relleno",
|
||||
"coherenceTab": "Parámetros de la coherencia"
|
||||
},
|
||||
"generation": {
|
||||
"title": "Generación"
|
||||
@@ -309,7 +405,10 @@
|
||||
"workflows": "Flujos de trabajo",
|
||||
"models": "Modelos",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)"
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"upscaling": "Upscaling",
|
||||
"gallery": "Galería",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
|
||||
}
|
||||
},
|
||||
"queue": {
|
||||
@@ -317,12 +416,81 @@
|
||||
"front": "Delante",
|
||||
"batchQueuedDesc_one": "Se agregó {{count}} sesión a {{direction}} la cola",
|
||||
"batchQueuedDesc_many": "Se agregaron {{count}} sesiones a {{direction}} la cola",
|
||||
"batchQueuedDesc_other": "Se agregaron {{count}} sesiones a {{direction}} la cola"
|
||||
"batchQueuedDesc_other": "Se agregaron {{count}} sesiones a {{direction}} la cola",
|
||||
"clearQueueAlertDialog": "Al vaciar la cola se cancela inmediatamente cualquier elemento de procesamiento y se vaciará la cola por completo. Los filtros pendientes se cancelarán.",
|
||||
"time": "Tiempo",
|
||||
"clearFailed": "Error al vaciar la cola",
|
||||
"cancelFailed": "Error al cancelar el elemento",
|
||||
"resumeFailed": "Error al reanudar el proceso",
|
||||
"pause": "Pausar",
|
||||
"pauseTooltip": "Pausar el proceso",
|
||||
"cancelBatchSucceeded": "Lote cancelado",
|
||||
"pruneSucceeded": "Se purgaron {{item_count}} elementos completados de la cola",
|
||||
"pruneFailed": "Error al purgar la cola",
|
||||
"cancelBatchFailed": "Error al cancelar los lotes",
|
||||
"pauseFailed": "Error al pausar el proceso",
|
||||
"status": "Estado",
|
||||
"origin": "Origen",
|
||||
"destination": "Destino",
|
||||
"generations_one": "Generación",
|
||||
"generations_many": "Generaciones",
|
||||
"generations_other": "Generaciones",
|
||||
"resume": "Reanudar",
|
||||
"queueEmpty": "Cola vacía",
|
||||
"cancelItem": "Cancelar elemento",
|
||||
"cancelBatch": "Cancelar lote",
|
||||
"openQueue": "Abrir la cola",
|
||||
"completed": "Completado",
|
||||
"enqueueing": "Añadir lotes a la cola",
|
||||
"clear": "Limpiar",
|
||||
"pauseSucceeded": "Proceso pausado",
|
||||
"resumeSucceeded": "Proceso reanudado",
|
||||
"resumeTooltip": "Reanudar proceso",
|
||||
"cancel": "Cancelar",
|
||||
"cancelTooltip": "Cancelar artículo actual",
|
||||
"pruneTooltip": "Purgar {{item_count}} elementos completados",
|
||||
"batchQueued": "Lote en cola",
|
||||
"pending": "Pendiente",
|
||||
"item": "Elemento",
|
||||
"total": "Total",
|
||||
"in_progress": "En proceso",
|
||||
"failed": "Fallido",
|
||||
"completedIn": "Completado en",
|
||||
"upscaling": "Upscaling",
|
||||
"canvas": "Lienzo",
|
||||
"generation": "Generación",
|
||||
"workflows": "Flujo de trabajo",
|
||||
"other": "Otro",
|
||||
"queueFront": "Añadir al principio de la cola",
|
||||
"gallery": "Galería",
|
||||
"batchFieldValues": "Valores de procesamiento por lotes",
|
||||
"session": "Sesión",
|
||||
"notReady": "La cola aún no está lista",
|
||||
"graphQueued": "Gráfico en cola",
|
||||
"clearQueueAlertDialog2": "¿Estás seguro que deseas vaciar la cola?",
|
||||
"next": "Siguiente",
|
||||
"iterations_one": "Interacción",
|
||||
"iterations_many": "Interacciones",
|
||||
"iterations_other": "Interacciones",
|
||||
"current": "Actual",
|
||||
"queue": "Cola",
|
||||
"queueBack": "Añadir a la cola",
|
||||
"cancelSucceeded": "Elemento cancelado",
|
||||
"clearTooltip": "Cancelar y limpiar todos los elementos",
|
||||
"clearSucceeded": "Cola vaciada",
|
||||
"canceled": "Cancelado",
|
||||
"batch": "Lote",
|
||||
"graphFailedToQueue": "Error al poner el gráfico en cola",
|
||||
"batchFailedToQueue": "Error al poner en cola el lote",
|
||||
"prompts_one": "Prompt",
|
||||
"prompts_many": "Prompts",
|
||||
"prompts_other": "Prompts",
|
||||
"prune": "Eliminar"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invitar compañeros de equipo",
|
||||
"shareAccess": "Compartir acceso",
|
||||
"professionalUpsell": "Disponible en la edición profesional de Invoke. Haz clic aquí o visita invoke.com/pricing para obtener más detalles."
|
||||
"professionalUpsell": "Disponible en la edición profesional de Invoke. Haga clic aquí o visite invoke.com/pricing para obtener más detalles."
|
||||
},
|
||||
"controlLayers": {
|
||||
"layer_one": "Capa",
|
||||
@@ -330,6 +498,415 @@
|
||||
"layer_other": "Capas",
|
||||
"layer_withCount_one": "({{count}}) capa",
|
||||
"layer_withCount_many": "({{count}}) capas",
|
||||
"layer_withCount_other": "({{count}}) capas"
|
||||
"layer_withCount_other": "({{count}}) capas",
|
||||
"copyToClipboard": "Copiar al portapapeles"
|
||||
},
|
||||
"whatsNew": {
|
||||
"readReleaseNotes": "Leer las notas de la versión",
|
||||
"watchRecentReleaseVideos": "Ver videos de versiones recientes",
|
||||
"watchUiUpdatesOverview": "Descripción general de las actualizaciones de la interfaz de usuario de Watch",
|
||||
"whatsNewInInvoke": "Novedades en Invoke",
|
||||
"items": [
|
||||
"<StrongComponent>SD 3.5</StrongComponent>: compatibilidad con SD 3.5 Medium y Large.",
|
||||
"<StrongComponent>Lienzo</StrongComponent>: Se ha simplificado el procesamiento de la capa de control y se ha mejorado la configuración predeterminada del control."
|
||||
]
|
||||
},
|
||||
"invocationCache": {
|
||||
"enableFailed": "Error al activar la cache",
|
||||
"cacheSize": "Tamaño de la caché",
|
||||
"hits": "Accesos a la caché",
|
||||
"invocationCache": "Caché",
|
||||
"misses": "Errores de la caché",
|
||||
"clear": "Limpiar",
|
||||
"maxCacheSize": "Tamaño máximo de la caché",
|
||||
"enableSucceeded": "Cache activada",
|
||||
"clearFailed": "Error al borrar la cache",
|
||||
"enable": "Activar",
|
||||
"useCache": "Uso de la caché",
|
||||
"disableSucceeded": "Caché desactivada",
|
||||
"clearSucceeded": "Caché borrada",
|
||||
"disable": "Desactivar",
|
||||
"disableFailed": "Error al desactivar la caché"
|
||||
},
|
||||
"hrf": {
|
||||
"hrf": "Solución de alta resolución",
|
||||
"enableHrf": "Activar corrección de alta resolución",
|
||||
"metadata": {
|
||||
"enabled": "Corrección de alta resolución activada",
|
||||
"strength": "Forzar la corrección de alta resolución",
|
||||
"method": "Método de corrección de alta resolución"
|
||||
},
|
||||
"upscaleMethod": "Método de expansión"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Añadir activador de los avisos",
|
||||
"compatibleEmbeddings": "Incrustaciones compatibles",
|
||||
"noMatchingTriggers": "No hay activadores coincidentes"
|
||||
},
|
||||
"hotkeys": {
|
||||
"hotkeys": "Atajo del teclado",
|
||||
"canvas": {
|
||||
"selectViewTool": {
|
||||
"desc": "Selecciona la herramienta de Visualización.",
|
||||
"title": "Visualización"
|
||||
},
|
||||
"cancelFilter": {
|
||||
"title": "Cancelar el filtro",
|
||||
"desc": "Cancelar el filtro pendiente."
|
||||
},
|
||||
"applyTransform": {
|
||||
"title": "Aplicar la transformación",
|
||||
"desc": "Aplicar la transformación pendiente a la capa seleccionada."
|
||||
},
|
||||
"applyFilter": {
|
||||
"desc": "Aplicar el filtro pendiente a la capa seleccionada.",
|
||||
"title": "Aplicar filtro"
|
||||
},
|
||||
"selectBrushTool": {
|
||||
"title": "Pincel",
|
||||
"desc": "Selecciona la herramienta pincel."
|
||||
},
|
||||
"selectBboxTool": {
|
||||
"desc": "Seleccionar la herramienta de selección del marco.",
|
||||
"title": "Selección del marco"
|
||||
},
|
||||
"selectMoveTool": {
|
||||
"desc": "Selecciona la herramienta Mover.",
|
||||
"title": "Mover"
|
||||
},
|
||||
"selectRectTool": {
|
||||
"title": "Rectángulo",
|
||||
"desc": "Selecciona la herramienta Rectángulo."
|
||||
},
|
||||
"decrementToolWidth": {
|
||||
"title": "Reducir el ancho de la herramienta",
|
||||
"desc": "Disminuye la anchura de la herramienta pincel o goma de borrar, según la que esté seleccionada."
|
||||
},
|
||||
"incrementToolWidth": {
|
||||
"title": "Incrementar la anchura de la herramienta",
|
||||
"desc": "Aumenta la anchura de la herramienta pincel o goma de borrar, según la que esté seleccionada."
|
||||
},
|
||||
"fitBboxToCanvas": {
|
||||
"title": "Ajustar bordes al lienzo",
|
||||
"desc": "Escala y posiciona la vista para ajustarla a los bodes."
|
||||
},
|
||||
"fitLayersToCanvas": {
|
||||
"title": "Ajustar capas al lienzo",
|
||||
"desc": "Escala y posiciona la vista para que se ajuste a todas las capas visibles."
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "Establecer color en blanco",
|
||||
"desc": "Establece el color actual de la herramienta en blanco."
|
||||
},
|
||||
"resetSelected": {
|
||||
"title": "Restablecer capa",
|
||||
"desc": "Restablecer la capa seleccionada. Solo se aplica a Máscara de retoque y Guía regional."
|
||||
},
|
||||
"setZoomTo400Percent": {
|
||||
"desc": "Ajuste la aplicación del lienzo al 400%.",
|
||||
"title": "Ampliar al 400%"
|
||||
},
|
||||
"transformSelected": {
|
||||
"desc": "Transformar la capa seleccionada.",
|
||||
"title": "Transformar"
|
||||
},
|
||||
"selectColorPickerTool": {
|
||||
"title": "Selector de color",
|
||||
"desc": "Seleccione la herramienta de selección de color."
|
||||
},
|
||||
"selectEraserTool": {
|
||||
"title": "Borrador",
|
||||
"desc": "Selecciona la herramienta Borrador."
|
||||
},
|
||||
"setZoomTo100Percent": {
|
||||
"title": "Ampliar al 100%",
|
||||
"desc": "Ajuste ampliar el lienzo al 100%."
|
||||
},
|
||||
"undo": {
|
||||
"title": "Deshacer",
|
||||
"desc": "Deshacer la última acción en el lienzo."
|
||||
},
|
||||
"nextEntity": {
|
||||
"desc": "Seleccione la siguiente capa de la lista.",
|
||||
"title": "Capa siguiente"
|
||||
},
|
||||
"redo": {
|
||||
"title": "Rehacer",
|
||||
"desc": "Rehacer la última acción en el lienzo."
|
||||
},
|
||||
"prevEntity": {
|
||||
"title": "Capa anterior",
|
||||
"desc": "Seleccione la capa anterior de la lista."
|
||||
},
|
||||
"title": "Lienzo",
|
||||
"setZoomTo200Percent": {
|
||||
"title": "Ampliar al 200%",
|
||||
"desc": "Ajuste la ampliación del lienzo al 200%."
|
||||
},
|
||||
"setZoomTo800Percent": {
|
||||
"title": "Ampliar al 800%",
|
||||
"desc": "Ajuste la ampliación del lienzo al 800%."
|
||||
},
|
||||
"filterSelected": {
|
||||
"desc": "Filtra la capa seleccionada. Solo se aplica a las capas Ráster y Control.",
|
||||
"title": "Filtrar"
|
||||
},
|
||||
"cancelTransform": {
|
||||
"title": "Cancelar transformación",
|
||||
"desc": "Cancelar la transformación pendiente."
|
||||
},
|
||||
"deleteSelected": {
|
||||
"title": "Borrar la capa",
|
||||
"desc": "Borrar la capa seleccionada."
|
||||
},
|
||||
"quickSwitch": {
|
||||
"desc": "Cambiar entre las dos últimas capas seleccionadas. Si una capa está seleccionada, cambia siempre entre ella y la última capa no seleccionada.",
|
||||
"title": "Cambio rápido de capa"
|
||||
}
|
||||
},
|
||||
"app": {
|
||||
"selectModelsTab": {
|
||||
"title": "Seleccione la pestaña Modelos",
|
||||
"desc": "Selecciona la pestaña Modelos."
|
||||
},
|
||||
"focusPrompt": {
|
||||
"desc": "Mueve el foco del cursor a la indicación positiva.",
|
||||
"title": "Enfoque"
|
||||
},
|
||||
"toggleLeftPanel": {
|
||||
"title": "Alternar panel izquierdo",
|
||||
"desc": "Mostrar u ocultar el panel izquierdo."
|
||||
},
|
||||
"selectQueueTab": {
|
||||
"title": "Seleccione la pestaña Cola",
|
||||
"desc": "Seleccione la pestaña Cola."
|
||||
},
|
||||
"selectCanvasTab": {
|
||||
"title": "Seleccione la pestaña Lienzo",
|
||||
"desc": "Selecciona la pestaña Lienzo."
|
||||
},
|
||||
"clearQueue": {
|
||||
"title": "Vaciar cola",
|
||||
"desc": "Cancelar y variar todos los elementos de la cola."
|
||||
},
|
||||
"selectUpscalingTab": {
|
||||
"title": "Selecciona la pestaña Ampliar",
|
||||
"desc": "Selecciona la pestaña Aumento de escala."
|
||||
},
|
||||
"togglePanels": {
|
||||
"desc": "Muestra u oculta los paneles izquierdo y derecho a la vez.",
|
||||
"title": "Alternar paneles"
|
||||
},
|
||||
"toggleRightPanel": {
|
||||
"title": "Alternar panel derecho",
|
||||
"desc": "Mostrar u ocultar el panel derecho."
|
||||
},
|
||||
"invokeFront": {
|
||||
"desc": "Pone en cola la solicitud de compilación y la agrega al principio de la cola.",
|
||||
"title": "Invocar (frente)"
|
||||
},
|
||||
"cancelQueueItem": {
|
||||
"title": "Cancelar",
|
||||
"desc": "Cancelar el elemento de la cola que se está procesando."
|
||||
},
|
||||
"invoke": {
|
||||
"desc": "Pone en cola la solicitud de compilación y la agrega al final de la cola.",
|
||||
"title": "Invocar"
|
||||
},
|
||||
"title": "Aplicación",
|
||||
"selectWorkflowsTab": {
|
||||
"title": "Seleccione la pestaña Flujos de trabajo",
|
||||
"desc": "Selecciona la pestaña Flujos de trabajo."
|
||||
},
|
||||
"resetPanelLayout": {
|
||||
"title": "Reiniciar la posición del panel",
|
||||
"desc": "Restablece los paneles izquierdo y derecho a su tamaño y disposición por defecto."
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
"addNode": {
|
||||
"title": "Añadir nodo",
|
||||
"desc": "Abrir añadir nodo."
|
||||
},
|
||||
"selectAll": {
|
||||
"title": "Seleccionar todo",
|
||||
"desc": "Seleccione todos los nodos y enlaces."
|
||||
},
|
||||
"deleteSelection": {
|
||||
"desc": "Borrar todos los nodos y enlaces seleccionados.",
|
||||
"title": "Borrar"
|
||||
},
|
||||
"undo": {
|
||||
"desc": "Deshaga la última acción.",
|
||||
"title": "Deshacer"
|
||||
},
|
||||
"redo": {
|
||||
"desc": "Rehacer la última acción.",
|
||||
"title": "Rehacer"
|
||||
},
|
||||
"pasteSelection": {
|
||||
"desc": "Pegar nodos y bordes copiados.",
|
||||
"title": "Pegar"
|
||||
},
|
||||
"title": "Flujos de trabajo",
|
||||
"copySelection": {
|
||||
"desc": "Copiar nodos y bordes seleccionados.",
|
||||
"title": "Copiar"
|
||||
},
|
||||
"pasteSelectionWithEdges": {
|
||||
"desc": "Pega los nodos copiados, los enlaces y todos los enlaces conectados a los nodos copiados.",
|
||||
"title": "Pegar con enlaces"
|
||||
}
|
||||
},
|
||||
"viewer": {
|
||||
"useSize": {
|
||||
"title": "Usar dimensiones",
|
||||
"desc": "Utiliza las dimensiones de la imagen actual como el tamaño del borde."
|
||||
},
|
||||
"remix": {
|
||||
"title": "Remezcla",
|
||||
"desc": "Recupera todos los metadatos excepto la semilla de la imagen actual."
|
||||
},
|
||||
"loadWorkflow": {
|
||||
"desc": "Carga el flujo de trabajo guardado de la imagen actual (si tiene uno).",
|
||||
"title": "Cargar flujo de trabajo"
|
||||
},
|
||||
"recallAll": {
|
||||
"desc": "Recupera todos los metadatos de la imagen actual.",
|
||||
"title": "Recuperar todos los metadatos"
|
||||
},
|
||||
"recallPrompts": {
|
||||
"desc": "Recuerde las indicaciones positivas y negativas de la imagen actual.",
|
||||
"title": "Recordatorios"
|
||||
},
|
||||
"recallSeed": {
|
||||
"title": "Recuperar semilla",
|
||||
"desc": "Recupera la semilla de la imagen actual."
|
||||
},
|
||||
"runPostprocessing": {
|
||||
"title": "Ejecutar posprocesamiento",
|
||||
"desc": "Ejecutar el posprocesamiento seleccionado en la imagen actual."
|
||||
},
|
||||
"toggleMetadata": {
|
||||
"title": "Mostrar/ocultar los metadatos",
|
||||
"desc": "Mostrar u ocultar la superposición de metadatos de la imagen actual."
|
||||
},
|
||||
"nextComparisonMode": {
|
||||
"desc": "Desplácese por los modos de comparación.",
|
||||
"title": "Siguiente comparación"
|
||||
},
|
||||
"title": "Visor de imágenes",
|
||||
"toggleViewer": {
|
||||
"title": "Mostrar/Ocultar el visor de imágenes",
|
||||
"desc": "Mostrar u ocultar el visor de imágenes. Solo disponible en la pestaña Lienzo."
|
||||
},
|
||||
"swapImages": {
|
||||
"title": "Intercambiar imágenes en la comparación",
|
||||
"desc": "Intercambia las imágenes que se están comparando."
|
||||
}
|
||||
},
|
||||
"gallery": {
|
||||
"clearSelection": {
|
||||
"title": "Limpiar selección",
|
||||
"desc": "Borrar la selección actual, si hay alguna."
|
||||
},
|
||||
"galleryNavUp": {
|
||||
"title": "Subir",
|
||||
"desc": "Navega hacia arriba en la cuadrícula de la galería y selecciona esa imagen. Si estás en la parte superior de la página, ve a la página anterior."
|
||||
},
|
||||
"galleryNavLeft": {
|
||||
"title": "Izquierda",
|
||||
"desc": "Navegue hacia la izquierda en la rejilla de la galería, seleccionando esa imagen. Si está en la primera imagen de la fila, vaya a la fila anterior. Si está en la primera imagen de la página, vaya a la página anterior."
|
||||
},
|
||||
"galleryNavDown": {
|
||||
"title": "Bajar",
|
||||
"desc": "Navegue hacia abajo en la parrilla de la galería, seleccionando esa imagen. Si se encuentra al final de la página, vaya a la página siguiente."
|
||||
},
|
||||
"galleryNavRight": {
|
||||
"title": "A la derecha",
|
||||
"desc": "Navegue hacia la derecha en la rejilla de la galería, seleccionando esa imagen. Si está en la última imagen de la fila, vaya a la fila siguiente. Si está en la última imagen de la página, vaya a la página siguiente."
|
||||
},
|
||||
"galleryNavUpAlt": {
|
||||
"desc": "Igual que arriba, pero selecciona la imagen de comparación, abriendo el modo de comparación si no está ya abierto.",
|
||||
"title": "Arriba (Comparar imagen)"
|
||||
},
|
||||
"deleteSelection": {
|
||||
"desc": "Borrar todas las imágenes seleccionadas. Por defecto, se le pedirá que confirme la eliminación. Si las imágenes están actualmente en uso en la aplicación, se te avisará.",
|
||||
"title": "Borrar"
|
||||
},
|
||||
"title": "Galería",
|
||||
"selectAllOnPage": {
|
||||
"title": "Seleccionar todo en la página",
|
||||
"desc": "Seleccionar todas las imágenes en la página actual."
|
||||
}
|
||||
},
|
||||
"searchHotkeys": "Buscar teclas de acceso rápido",
|
||||
"noHotkeysFound": "Sin teclas de acceso rápido",
|
||||
"clearSearch": "Limpiar la búsqueda"
|
||||
},
|
||||
"metadata": {
|
||||
"guidance": "Orientación",
|
||||
"createdBy": "Creado por",
|
||||
"noImageDetails": "Sin detalles en la imagen",
|
||||
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
|
||||
"height": "Altura",
|
||||
"imageDimensions": "Dimensiones de la imagen",
|
||||
"seamlessXAxis": "Eje X sin juntas",
|
||||
"seamlessYAxis": "Eje Y sin juntas",
|
||||
"generationMode": "Modo de generación",
|
||||
"scheduler": "Programador",
|
||||
"width": "Ancho",
|
||||
"Threshold": "Umbral de ruido",
|
||||
"canvasV2Metadata": "Lienzo",
|
||||
"metadata": "Metadatos",
|
||||
"model": "Modelo",
|
||||
"allPrompts": "Todas las indicaciones",
|
||||
"cfgScale": "Escala CFG",
|
||||
"imageDetails": "Detalles de la imagen",
|
||||
"negativePrompt": "Indicación negativa",
|
||||
"noMetaData": "Sin metadatos",
|
||||
"parameterSet": "Parámetro {{parameter}} establecido",
|
||||
"vae": "Autocodificador",
|
||||
"workflow": "Flujo de trabajo",
|
||||
"seed": "Semilla",
|
||||
"strength": "Forzar imagen a imagen",
|
||||
"recallParameters": "Parámetros de recuperación",
|
||||
"recallParameter": "Recuperar {{label}}",
|
||||
"steps": "Pasos",
|
||||
"noRecallParameters": "Sin parámetros para recuperar",
|
||||
"parsingFailed": "Error al analizar"
|
||||
},
|
||||
"system": {
|
||||
"logLevel": {
|
||||
"debug": "Depurar",
|
||||
"info": "Información",
|
||||
"warn": "Advertir",
|
||||
"fatal": "Grave",
|
||||
"error": "Error",
|
||||
"trace": "Rastro",
|
||||
"logLevel": "Nivel del registro"
|
||||
},
|
||||
"enableLogging": "Activar registro",
|
||||
"logNamespaces": {
|
||||
"workflows": "Flujos de trabajo",
|
||||
"system": "Sistema",
|
||||
"metadata": "Metadatos",
|
||||
"gallery": "Galería",
|
||||
"logNamespaces": "Espacios para los nombres de registro",
|
||||
"generation": "Generación",
|
||||
"events": "Eventos",
|
||||
"canvas": "Lienzo",
|
||||
"config": "Ajustes",
|
||||
"models": "Modelos",
|
||||
"queue": "Cola"
|
||||
}
|
||||
},
|
||||
"newUserExperience": {
|
||||
"downloadStarterModels": "Descargar modelos de inicio",
|
||||
"toGetStarted": "Para empezar, introduzca un mensaje en el cuadro y haga clic en <StrongComponent>Invocar</StrongComponent> para generar su primera imagen. Seleccione una plantilla para mejorar los resultados. Puede elegir guardar sus imágenes directamente en <StrongComponent>Galería</StrongComponent> o editarlas en <StrongComponent>Lienzo</StrongComponent>.",
|
||||
"importModels": "Importar modelos",
|
||||
"noModelsInstalled": "Parece que no tienes ningún modelo instalado",
|
||||
"gettingStartedSeries": "¿Desea más orientación? Consulte nuestra <LinkComponent>Serie de introducción</LinkComponent> para obtener consejos sobre cómo aprovechar todo el potencial de Invoke Studio.",
|
||||
"toGetStartedLocal": "Para empezar, asegúrate de descargar o importar los modelos necesarios para ejecutar Invoke. A continuación, introduzca un mensaje en el cuadro y haga clic en <StrongComponent>Invocar</StrongComponent> para generar su primera imagen. Seleccione una plantilla para mejorar los resultados. Puede elegir guardar sus imágenes directamente en <StrongComponent>Galería</StrongComponent> o editarlas en el <StrongComponent>Lienzo</StrongComponent>."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1985,7 +1985,6 @@
|
||||
"inpaintMask_withCount_many": "Remplir les masques",
|
||||
"inpaintMask_withCount_other": "Remplir les masques",
|
||||
"newImg2ImgCanvasFromImage": "Nouvelle Img2Img à partir de l'image",
|
||||
"resetCanvas": "Réinitialiser la Toile",
|
||||
"bboxOverlay": "Afficher la superposition des Bounding Box",
|
||||
"moveToFront": "Déplacer vers le permier plan",
|
||||
"moveToBack": "Déplacer vers l'arrière plan",
|
||||
@@ -2034,7 +2033,6 @@
|
||||
"help2": "Commencez par un point <Bold>Inclure</Bold> au sein de l'objet cible. Ajoutez d'autres points pour affiner la sélection. Moins de points produisent généralement de meilleurs résultats.",
|
||||
"help3": "Inversez la sélection pour sélectionner tout sauf l'objet cible."
|
||||
},
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) en tant que $t(controlLayers.controlLayer)",
|
||||
"convertRegionalGuidanceTo": "Convertir $t(controlLayers.regionalGuidance) vers",
|
||||
"copyRasterLayerTo": "Copier $t(controlLayers.rasterLayer) vers",
|
||||
"newControlLayer": "Nouveau $t(controlLayers.controlLayer)",
|
||||
@@ -2044,8 +2042,7 @@
|
||||
"convertInpaintMaskTo": "Convertir $t(controlLayers.inpaintMask) vers",
|
||||
"copyControlLayerTo": "Copier $t(controlLayers.controlLayer) vers",
|
||||
"newInpaintMask": "Nouveau $t(controlLayers.inpaintMask)",
|
||||
"newRasterLayer": "Nouveau $t(controlLayers.rasterLayer)",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) en tant que $t(controlLayers.rasterLayer)"
|
||||
"newRasterLayer": "Nouveau $t(controlLayers.rasterLayer)"
|
||||
},
|
||||
"upscaling": {
|
||||
"exceedsMaxSizeDetails": "La limite maximale d'agrandissement est de {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixels. Veuillez essayer une image plus petite ou réduire votre sélection d'échelle.",
|
||||
|
||||
@@ -94,7 +94,9 @@
|
||||
"view": "Vista",
|
||||
"close": "Chiudi",
|
||||
"clipboard": "Appunti",
|
||||
"ok": "Ok"
|
||||
"ok": "Ok",
|
||||
"generating": "Generazione",
|
||||
"loadingModel": "Caricamento del modello"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -597,7 +599,18 @@
|
||||
"huggingFace": "HuggingFace",
|
||||
"huggingFaceRepoID": "HuggingFace Repository ID",
|
||||
"clipEmbed": "CLIP Embed",
|
||||
"t5Encoder": "T5 Encoder"
|
||||
"t5Encoder": "T5 Encoder",
|
||||
"hfTokenInvalidErrorMessage": "Gettone HuggingFace non valido o mancante.",
|
||||
"hfTokenRequired": "Stai tentando di scaricare un modello che richiede un gettone HuggingFace valido.",
|
||||
"hfTokenUnableToVerifyErrorMessage": "Impossibile verificare il gettone HuggingFace. Ciò è probabilmente dovuto a un errore di rete. Riprova più tardi.",
|
||||
"hfTokenHelperText": "Per utilizzare alcuni modelli è necessario un gettone HF. Fai clic qui per creare o ottenere il tuo gettone.",
|
||||
"hfTokenInvalid": "Gettone HF non valido o mancante",
|
||||
"hfTokenUnableToVerify": "Impossibile verificare il gettone HF",
|
||||
"hfTokenSaved": "Gettone HF salvato",
|
||||
"hfForbidden": "Non hai accesso a questo modello HF",
|
||||
"hfTokenLabel": "Gettone HuggingFace (richiesto per alcuni modelli)",
|
||||
"hfForbiddenErrorMessage": "Consigliamo di visitare la pagina del repository su HuggingFace.com. Il proprietario potrebbe richiedere l'accettazione dei termini per poter effettuare il download.",
|
||||
"hfTokenInvalidErrorMessage2": "Aggiornalo in "
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -699,7 +712,9 @@
|
||||
"staged": "Maschera espansa",
|
||||
"optimizedImageToImage": "Immagine-a-immagine ottimizzata",
|
||||
"sendToCanvas": "Invia alla Tela",
|
||||
"coherenceMinDenoise": "Riduzione minima del rumore"
|
||||
"coherenceMinDenoise": "Min rid. rumore",
|
||||
"recallMetadata": "Richiama i metadati",
|
||||
"disabledNoRasterContent": "Disabilitato (nessun contenuto Raster)"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelli",
|
||||
@@ -737,7 +752,8 @@
|
||||
"confirmOnNewSession": "Conferma su nuova sessione",
|
||||
"enableModelDescriptions": "Abilita le descrizioni dei modelli nei menu a discesa",
|
||||
"modelDescriptionsDisabled": "Descrizioni dei modelli nei menu a discesa disabilitate",
|
||||
"modelDescriptionsDisabledDesc": "Le descrizioni dei modelli nei menu a discesa sono state disabilitate. Abilitale nelle Impostazioni."
|
||||
"modelDescriptionsDisabledDesc": "Le descrizioni dei modelli nei menu a discesa sono state disabilitate. Abilitale nelle Impostazioni.",
|
||||
"showDetailedInvocationProgress": "Mostra dettagli avanzamento"
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "Caricamento fallito",
|
||||
@@ -1263,8 +1279,9 @@
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
"paragraphs": [
|
||||
"Quanto rumore viene aggiunto all'immagine in ingresso.",
|
||||
"0 risulterà in un'immagine identica, mentre 1 risulterà in un'immagine completamente nuova."
|
||||
"Controlla la differenza tra l'immagine generata e il/i livello/i raster.",
|
||||
"Una forza inferiore rimane più vicina ai livelli raster visibili combinati. Una forza superiore si basa maggiormente sul prompt globale.",
|
||||
"Se non sono presenti livelli raster con contenuto visibile, questa impostazione viene ignorata."
|
||||
],
|
||||
"heading": "Forza di riduzione del rumore"
|
||||
},
|
||||
@@ -1276,7 +1293,7 @@
|
||||
},
|
||||
"infillMethod": {
|
||||
"paragraphs": [
|
||||
"Metodo di riempimento durante il processo di Outpainting o Inpainting."
|
||||
"Metodo di riempimento durante il processo di Outpaint o Inpaint."
|
||||
],
|
||||
"heading": "Metodo di riempimento"
|
||||
},
|
||||
@@ -1444,7 +1461,7 @@
|
||||
"heading": "Livello minimo di riduzione del rumore",
|
||||
"paragraphs": [
|
||||
"Intensità minima di riduzione rumore per la modalità di Coerenza",
|
||||
"L'intensità minima di riduzione del rumore per la regione di coerenza durante l'inpainting o l'outpainting"
|
||||
"L'intensità minima di riduzione del rumore per la regione di coerenza durante l'inpaint o l'outpaint"
|
||||
]
|
||||
},
|
||||
"compositingMaskBlur": {
|
||||
@@ -1498,7 +1515,7 @@
|
||||
"optimizedDenoising": {
|
||||
"heading": "Immagine-a-immagine ottimizzata",
|
||||
"paragraphs": [
|
||||
"Abilita 'Immagine-a-immagine ottimizzata' per una scala di riduzione del rumore più graduale per le trasformazioni da immagine a immagine e di inpainting con modelli Flux. Questa impostazione migliora la capacità di controllare la quantità di modifica applicata a un'immagine, ma può essere disattivata se preferisci usare la scala di riduzione rumore standard. Questa impostazione è ancora in fase di messa a punto ed è in stato beta."
|
||||
"Abilita 'Immagine-a-immagine ottimizzata' per una scala di riduzione del rumore più graduale per le trasformazioni da immagine a immagine e di inpaint con modelli Flux. Questa impostazione migliora la capacità di controllare la quantità di modifica applicata a un'immagine, ma può essere disattivata se preferisci usare la scala di riduzione rumore standard. Questa impostazione è ancora in fase di messa a punto ed è in stato beta."
|
||||
]
|
||||
},
|
||||
"paramGuidance": {
|
||||
@@ -1733,8 +1750,7 @@
|
||||
"newRegionalReferenceImageError": "Problema nella creazione dell'immagine di riferimento regionale",
|
||||
"newControlLayerOk": "Livello di controllo creato",
|
||||
"bboxOverlay": "Mostra sovrapposizione riquadro",
|
||||
"resetCanvas": "Reimposta la tela",
|
||||
"outputOnlyMaskedRegions": "Solo regioni mascherate in uscita",
|
||||
"outputOnlyMaskedRegions": "In uscita solo le regioni generate",
|
||||
"enableAutoNegative": "Abilita Auto Negativo",
|
||||
"disableAutoNegative": "Disabilita Auto Negativo",
|
||||
"showHUD": "Mostra HUD",
|
||||
@@ -1771,7 +1787,7 @@
|
||||
"globalReferenceImage_withCount_many": "Immagini di riferimento Globali",
|
||||
"globalReferenceImage_withCount_other": "Immagini di riferimento Globali",
|
||||
"controlMode": {
|
||||
"balanced": "Bilanciato",
|
||||
"balanced": "Bilanciato (consigliato)",
|
||||
"controlMode": "Modalità di controllo",
|
||||
"prompt": "Prompt",
|
||||
"control": "Controllo",
|
||||
@@ -1782,7 +1798,7 @@
|
||||
"beginEndStepPercentShort": "Inizio/Fine %",
|
||||
"stagingOnCanvas": "Genera immagini nella",
|
||||
"ipAdapterMethod": {
|
||||
"full": "Completo",
|
||||
"full": "Stile e Composizione",
|
||||
"style": "Solo Stile",
|
||||
"composition": "Solo Composizione",
|
||||
"ipAdapterMethod": "Metodo Adattatore IP"
|
||||
@@ -1881,7 +1897,10 @@
|
||||
"lineart_anime_edge_detection": {
|
||||
"description": "Genera una mappa dei bordi dal livello selezionato utilizzando il modello di rilevamento dei bordi Lineart Anime.",
|
||||
"label": "Rilevamento bordi Lineart Anime"
|
||||
}
|
||||
},
|
||||
"forMoreControl": "Per un maggiore controllo, fare clic su Avanzate qui sotto.",
|
||||
"advanced": "Avanzate",
|
||||
"processingLayerWith": "Elaborazione del livello con il filtro {{type}}."
|
||||
},
|
||||
"controlLayers_withCount_hidden": "Livelli di controllo ({{count}} nascosti)",
|
||||
"regionalGuidance_withCount_hidden": "Guida regionale ({{count}} nascosti)",
|
||||
@@ -2016,8 +2035,6 @@
|
||||
"convertControlLayerTo": "Converti $t(controlLayers.controlLayer) in",
|
||||
"newRasterLayer": "Nuovo $t(controlLayers.rasterLayer)",
|
||||
"newRegionalGuidance": "Nuova $t(controlLayers.regionalGuidance)",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) come $t(controlLayers.rasterLayer)",
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) come $t(controlLayers.controlLayer)",
|
||||
"convertInpaintMaskTo": "Converti $t(controlLayers.inpaintMask) in",
|
||||
"copyRegionalGuidanceTo": "Copia $t(controlLayers.regionalGuidance) in",
|
||||
"convertRasterLayerTo": "Converti $t(controlLayers.rasterLayer) in",
|
||||
@@ -2025,7 +2042,9 @@
|
||||
"newControlLayer": "Nuovo $t(controlLayers.controlLayer)",
|
||||
"newInpaintMask": "Nuova $t(controlLayers.inpaintMask)",
|
||||
"replaceCurrent": "Sostituisci corrente",
|
||||
"mergeDown": "Unire in basso"
|
||||
"mergeDown": "Unire in basso",
|
||||
"mergingLayers": "Unione dei livelli",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare."
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2057,7 +2076,9 @@
|
||||
"postProcessingMissingModelWarning": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare un modello di post-elaborazione (da immagine a immagine).",
|
||||
"exceedsMaxSize": "Le impostazioni di ampliamento superano il limite massimo delle dimensioni",
|
||||
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata.",
|
||||
"upscale": "Amplia"
|
||||
"upscale": "Amplia",
|
||||
"incompatibleBaseModel": "Architettura del modello principale non supportata per l'ampliamento",
|
||||
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento."
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invita collaboratori",
|
||||
@@ -2119,12 +2140,13 @@
|
||||
},
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "Novità in Invoke",
|
||||
"line2": "Supporto Flux esteso, ora con immagini di riferimento globali",
|
||||
"line3": "Tooltip e menu contestuali migliorati",
|
||||
"readReleaseNotes": "Leggi le note di rilascio",
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"line1": "Strumento <ItalicComponent>Seleziona oggetto</ItalicComponent> per la selezione e la modifica precise degli oggetti",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia"
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"<StrongComponent>SD 3.5</StrongComponent>: supporto per SD 3.5 Medium e Large.",
|
||||
"<StrongComponent>Tela</StrongComponent>: elaborazione semplificata del livello di controllo e impostazioni di controllo predefinite migliorate."
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
"logLevel": {
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"discordLabel": "Discord",
|
||||
"nodes": "ワークフロー",
|
||||
"txt2img": "txt2img",
|
||||
"postprocessing": "Post Processing",
|
||||
"postprocessing": "ポストプロセス",
|
||||
"t2iAdapter": "T2I アダプター",
|
||||
"communityLabel": "コミュニティ",
|
||||
"dontAskMeAgain": "次回から確認しない",
|
||||
@@ -71,8 +71,8 @@
|
||||
"orderBy": "並び順:",
|
||||
"enabled": "有効",
|
||||
"notInstalled": "未インストール",
|
||||
"positivePrompt": "プロンプト",
|
||||
"negativePrompt": "除外する要素",
|
||||
"positivePrompt": "ポジティブプロンプト",
|
||||
"negativePrompt": "ネガティブプロンプト",
|
||||
"selected": "選択済み",
|
||||
"aboutDesc": "Invokeを業務で利用する場合はマークしてください:",
|
||||
"beta": "ベータ",
|
||||
@@ -80,7 +80,20 @@
|
||||
"editor": "エディタ",
|
||||
"safetensors": "Safetensors",
|
||||
"tab": "タブ",
|
||||
"toResolve": "解決方法"
|
||||
"toResolve": "解決方法",
|
||||
"openInViewer": "ビューアで開く",
|
||||
"placeholderSelectAModel": "モデルを選択",
|
||||
"clipboard": "クリップボード",
|
||||
"apply": "適用",
|
||||
"loadingImage": "画像をロード中",
|
||||
"off": "オフ",
|
||||
"view": "ビュー",
|
||||
"edit": "編集",
|
||||
"ok": "OK",
|
||||
"reset": "リセット",
|
||||
"none": "なし",
|
||||
"new": "新規",
|
||||
"close": "閉じる"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "画像のサイズ",
|
||||
@@ -125,12 +138,114 @@
|
||||
"compareHelp1": "<Kbd>Alt</Kbd> キーを押しながらギャラリー画像をクリックするか、矢印キーを使用して比較画像を変更します。",
|
||||
"compareHelp3": "<Kbd>C</Kbd>を押して、比較した画像を入れ替えます。",
|
||||
"compareHelp4": "<Kbd>[Z</Kbd>]または<Kbd>[Esc</Kbd>]を押して終了します。",
|
||||
"compareHelp2": "<Kbd>M</Kbd> キーを押して比較モードを切り替えます。"
|
||||
"compareHelp2": "<Kbd>M</Kbd> キーを押して比較モードを切り替えます。",
|
||||
"move": "移動",
|
||||
"openViewer": "ビューアを開く",
|
||||
"closeViewer": "ビューアを閉じる",
|
||||
"exitSearch": "画像検索を終了",
|
||||
"oldestFirst": "最古から",
|
||||
"showStarredImagesFirst": "スター付き画像を最初に",
|
||||
"exitBoardSearch": "ボード検索を終了",
|
||||
"showArchivedBoards": "アーカイブされたボードを表示",
|
||||
"searchImages": "メタデータで検索",
|
||||
"gallery": "ギャラリー",
|
||||
"newestFirst": "最新から",
|
||||
"jump": "ジャンプ",
|
||||
"go": "進む",
|
||||
"sortDirection": "並び替え順",
|
||||
"displayBoardSearch": "ボード検索",
|
||||
"displaySearch": "画像を検索",
|
||||
"boardsSettings": "ボード設定",
|
||||
"imagesSettings": "ギャラリー画像設定"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "ホットキーを検索",
|
||||
"clearSearch": "検索をクリア",
|
||||
"noHotkeysFound": "ホットキーが見つかりません"
|
||||
"noHotkeysFound": "ホットキーが見つかりません",
|
||||
"viewer": {
|
||||
"runPostprocessing": {
|
||||
"title": "ポストプロセスを実行"
|
||||
},
|
||||
"useSize": {
|
||||
"title": "サイズを使用"
|
||||
},
|
||||
"recallPrompts": {
|
||||
"title": "プロンプトを再使用"
|
||||
},
|
||||
"recallAll": {
|
||||
"title": "全てのメタデータを再使用"
|
||||
},
|
||||
"recallSeed": {
|
||||
"title": "シード値を再使用"
|
||||
}
|
||||
},
|
||||
"canvas": {
|
||||
"redo": {
|
||||
"title": "やり直し"
|
||||
},
|
||||
"transformSelected": {
|
||||
"title": "変形"
|
||||
},
|
||||
"undo": {
|
||||
"title": "取り消し"
|
||||
},
|
||||
"selectEraserTool": {
|
||||
"title": "消しゴムツール"
|
||||
},
|
||||
"cancelTransform": {
|
||||
"title": "変形をキャンセル"
|
||||
},
|
||||
"resetSelected": {
|
||||
"title": "レイヤーをリセット"
|
||||
},
|
||||
"applyTransform": {
|
||||
"title": "変形を適用"
|
||||
},
|
||||
"selectColorPickerTool": {
|
||||
"title": "スポイトツール"
|
||||
},
|
||||
"fitBboxToCanvas": {
|
||||
"title": "バウンディングボックスをキャンバスにフィット"
|
||||
},
|
||||
"selectBrushTool": {
|
||||
"title": "ブラシツール"
|
||||
},
|
||||
"selectMoveTool": {
|
||||
"title": "移動ツール"
|
||||
},
|
||||
"selectBboxTool": {
|
||||
"title": "バウンディングボックスツール"
|
||||
},
|
||||
"title": "キャンバス",
|
||||
"fitLayersToCanvas": {
|
||||
"title": "レイヤーをキャンバスにフィット"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
"undo": {
|
||||
"title": "取り消し"
|
||||
},
|
||||
"redo": {
|
||||
"title": "やり直し"
|
||||
}
|
||||
},
|
||||
"app": {
|
||||
"toggleLeftPanel": {
|
||||
"title": "左パネルをトグル",
|
||||
"desc": "左パネルを表示または非表示。"
|
||||
},
|
||||
"title": "アプリケーション",
|
||||
"invoke": {
|
||||
"title": "Invoke"
|
||||
},
|
||||
"cancelQueueItem": {
|
||||
"title": "キャンセル"
|
||||
},
|
||||
"clearQueue": {
|
||||
"title": "キューをクリア"
|
||||
}
|
||||
},
|
||||
"hotkeys": "ホットキー"
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "モデルマネージャ",
|
||||
@@ -165,7 +280,7 @@
|
||||
"convertToDiffusers": "ディフューザーに変換",
|
||||
"alpha": "アルファ",
|
||||
"modelConverted": "モデル変換が完了しました",
|
||||
"predictionType": "予測タイプ(安定したディフュージョン 2.x モデルおよび一部の安定したディフュージョン 1.x モデル用)",
|
||||
"predictionType": "予測タイプ(SD 2.x モデルおよび一部のSD 1.x モデル用)",
|
||||
"selectModel": "モデルを選択",
|
||||
"advanced": "高度な設定",
|
||||
"modelDeleted": "モデルが削除されました",
|
||||
@@ -178,7 +293,9 @@
|
||||
"convertToDiffusersHelpText1": "このモデルは 🧨 Diffusers フォーマットに変換されます。",
|
||||
"convertToDiffusersHelpText3": "チェックポイントファイルは、InvokeAIルートフォルダ内にある場合、ディスクから削除されます。カスタムロケーションにある場合は、削除されません。",
|
||||
"convertToDiffusersHelpText4": "これは一回限りのプロセスです。コンピュータの仕様によっては、約30秒から60秒かかる可能性があります。",
|
||||
"cancel": "キャンセル"
|
||||
"cancel": "キャンセル",
|
||||
"uploadImage": "画像をアップロード",
|
||||
"addModels": "モデルを追加"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "画像",
|
||||
@@ -200,7 +317,19 @@
|
||||
"info": "情報",
|
||||
"showOptionsPanel": "オプションパネルを表示",
|
||||
"iterations": "生成回数",
|
||||
"general": "基本設定"
|
||||
"general": "基本設定",
|
||||
"setToOptimalSize": "サイズをモデルに最適化",
|
||||
"invoke": {
|
||||
"addingImagesTo": "画像の追加先"
|
||||
},
|
||||
"aspect": "縦横比",
|
||||
"lockAspectRatio": "縦横比を固定",
|
||||
"scheduler": "スケジューラー",
|
||||
"sendToUpscale": "アップスケーラーに転送",
|
||||
"useSize": "サイズを使用",
|
||||
"postProcessing": "ポストプロセス (Shift + U)",
|
||||
"denoisingStrength": "ノイズ除去強度",
|
||||
"recallMetadata": "メタデータを再使用"
|
||||
},
|
||||
"settings": {
|
||||
"models": "モデル",
|
||||
@@ -213,7 +342,11 @@
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "アップロード失敗",
|
||||
"imageCopied": "画像をコピー"
|
||||
"imageCopied": "画像をコピー",
|
||||
"imageUploadFailed": "画像のアップロードに失敗しました",
|
||||
"uploadFailedInvalidUploadDesc": "画像はPNGかJPGである必要があります。",
|
||||
"sentToUpscale": "アップスケーラーに転送しました",
|
||||
"imageUploaded": "画像をアップロードしました"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "進捗バー",
|
||||
@@ -226,7 +359,10 @@
|
||||
"resetUI": "$t(accessibility.reset) UI",
|
||||
"mode": "モード:",
|
||||
"about": "Invoke について",
|
||||
"submitSupportTicket": "サポート依頼を送信する"
|
||||
"submitSupportTicket": "サポート依頼を送信する",
|
||||
"uploadImages": "画像をアップロード",
|
||||
"toggleLeftPanel": "左パネルをトグル (T)",
|
||||
"toggleRightPanel": "右パネルをトグル (G)"
|
||||
},
|
||||
"metadata": {
|
||||
"Threshold": "ノイズ閾値",
|
||||
@@ -237,7 +373,8 @@
|
||||
"scheduler": "スケジューラー",
|
||||
"positivePrompt": "ポジティブプロンプト",
|
||||
"strength": "Image to Image 強度",
|
||||
"recallParameters": "パラメータを呼び出す"
|
||||
"recallParameters": "パラメータを再使用",
|
||||
"recallParameter": "{{label}} を再使用"
|
||||
},
|
||||
"queue": {
|
||||
"queueEmpty": "キューが空です",
|
||||
@@ -297,14 +434,22 @@
|
||||
"prune": "刈り込み",
|
||||
"prompts_other": "プロンプト",
|
||||
"iterations_other": "繰り返し",
|
||||
"generations_other": "生成"
|
||||
"generations_other": "生成",
|
||||
"canvas": "キャンバス",
|
||||
"workflows": "ワークフロー",
|
||||
"upscaling": "アップスケール",
|
||||
"generation": "生成",
|
||||
"other": "その他",
|
||||
"gallery": "ギャラリー"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "一致するモデルがありません",
|
||||
"loading": "読み込み中",
|
||||
"noMatchingLoRAs": "一致するLoRAがありません",
|
||||
"noModelsAvailable": "使用可能なモデルがありません",
|
||||
"selectModel": "モデルを選択してください"
|
||||
"selectModel": "モデルを選択してください",
|
||||
"concepts": "コンセプト",
|
||||
"addLora": "LoRAを追加"
|
||||
},
|
||||
"nodes": {
|
||||
"addNode": "ノードを追加",
|
||||
@@ -339,7 +484,8 @@
|
||||
"cannotConnectOutputToOutput": "出力から出力には接続できません",
|
||||
"cannotConnectToSelf": "自身のノードには接続できません",
|
||||
"colorCodeEdges": "カラー-Code Edges",
|
||||
"loadingNodes": "ノードを読み込み中..."
|
||||
"loadingNodes": "ノードを読み込み中...",
|
||||
"scheduler": "スケジューラー"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "自動追加するボード",
|
||||
@@ -362,7 +508,18 @@
|
||||
"deleteBoardAndImages": "ボードと画像の削除",
|
||||
"deleteBoardOnly": "ボードのみ削除",
|
||||
"deletedBoardsCannotbeRestored": "削除されたボードは復元できません",
|
||||
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:"
|
||||
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:",
|
||||
"hideBoards": "ボードを隠す",
|
||||
"assetsWithCount_other": "{{count}} のアセット",
|
||||
"addPrivateBoard": "プライベートボードを追加",
|
||||
"addSharedBoard": "共有ボードを追加",
|
||||
"boards": "ボード",
|
||||
"private": "プライベートボード",
|
||||
"shared": "共有ボード",
|
||||
"archiveBoard": "ボードをアーカイブ",
|
||||
"archived": "アーカイブ完了",
|
||||
"unarchiveBoard": "アーカイブされていないボード",
|
||||
"imagesWithCount_other": "{{count}} の画像"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "呼び出しキャッシュ",
|
||||
@@ -387,6 +544,33 @@
|
||||
"paragraphs": [
|
||||
"生成された画像の縦横比。"
|
||||
]
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
"heading": "領域ガイダンスと領域参照画像"
|
||||
},
|
||||
"regionalReferenceImage": {
|
||||
"heading": "領域参照画像"
|
||||
},
|
||||
"paramScheduler": {
|
||||
"heading": "スケジューラー"
|
||||
},
|
||||
"regionalGuidance": {
|
||||
"heading": "領域ガイダンス"
|
||||
},
|
||||
"rasterLayer": {
|
||||
"heading": "ラスターレイヤー"
|
||||
},
|
||||
"globalReferenceImage": {
|
||||
"heading": "全域参照画像"
|
||||
},
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "アップスケール手法"
|
||||
},
|
||||
"upscaleModel": {
|
||||
"heading": "アップスケールモデル"
|
||||
},
|
||||
"paramAspect": {
|
||||
"heading": "縦横比"
|
||||
}
|
||||
},
|
||||
"accordions": {
|
||||
@@ -427,5 +611,79 @@
|
||||
"tabs": {
|
||||
"queue": "キュー"
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
"globalReferenceImage_withCount_other": "全域参照画像",
|
||||
"regionalReferenceImage": "領域参照画像",
|
||||
"saveLayerToAssets": "レイヤーをアセットに保存",
|
||||
"global": "全域",
|
||||
"inpaintMasks_withCount_hidden": "インペイントマスク ({{count}} hidden)",
|
||||
"opacity": "透明度",
|
||||
"canvasContextMenu": {
|
||||
"newRegionalGuidance": "新規領域ガイダンス",
|
||||
"bboxGroup": "バウンディングボックスから作成",
|
||||
"cropCanvasToBbox": "キャンバスをバウンディングボックスでクロップ",
|
||||
"newGlobalReferenceImage": "新規全域参照画像",
|
||||
"newRegionalReferenceImage": "新規領域参照画像"
|
||||
},
|
||||
"regionalGuidance": "領域ガイダンス",
|
||||
"globalReferenceImage": "全域参照画像",
|
||||
"moveForward": "前面へ移動",
|
||||
"copyInpaintMaskTo": "$t(controlLayers.inpaintMask) をコピー",
|
||||
"transform": {
|
||||
"fitToBbox": "バウンディングボックスにフィット",
|
||||
"transform": "変形",
|
||||
"apply": "適用",
|
||||
"cancel": "キャンセル",
|
||||
"reset": "リセット"
|
||||
},
|
||||
"cropLayerToBbox": "レイヤーをバウンディングボックスでクロップ",
|
||||
"convertInpaintMaskTo": "$t(controlLayers.inpaintMask)を変換",
|
||||
"regionalGuidance_withCount_other": "領域ガイダンス",
|
||||
"tool": {
|
||||
"colorPicker": "スポイト",
|
||||
"brush": "ブラシ",
|
||||
"rectangle": "矩形",
|
||||
"move": "移動",
|
||||
"eraser": "消しゴム"
|
||||
},
|
||||
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
||||
"saveBboxToGallery": "バウンディングボックスをギャラリーへ保存",
|
||||
"moveToBack": "最背面へ移動",
|
||||
"duplicate": "複製",
|
||||
"addLayer": "レイヤーを追加",
|
||||
"rasterLayer": "ラスターレイヤー",
|
||||
"inpaintMasks_withCount_visible": "({{count}}) インペイントマスク",
|
||||
"regional": "領域",
|
||||
"rectangle": "矩形",
|
||||
"moveBackward": "背面へ移動",
|
||||
"moveToFront": "最前面へ移動",
|
||||
"mergeDown": "レイヤーを統合",
|
||||
"inpaintMask_withCount_other": "インペイントマスク",
|
||||
"canvas": "キャンバス",
|
||||
"fitBboxToLayers": "バウンディングボックスをレイヤーにフィット",
|
||||
"removeBookmark": "ブックマークを外す",
|
||||
"savedToGalleryOk": "ギャラリーに保存しました"
|
||||
},
|
||||
"stylePresets": {
|
||||
"clearTemplateSelection": "選択したテンプレートをクリア",
|
||||
"choosePromptTemplate": "プロンプトテンプレートを選択",
|
||||
"myTemplates": "自分のテンプレート",
|
||||
"flatten": "選択中のテンプレートをプロンプトに展開",
|
||||
"uploadImage": "画像をアップロード",
|
||||
"defaultTemplates": "デフォルトテンプレート",
|
||||
"createPromptTemplate": "プロンプトテンプレートを作成",
|
||||
"promptTemplateCleared": "プロンプトテンプレートをクリアしました",
|
||||
"searchByName": "名前で検索",
|
||||
"toggleViewMode": "表示モードを切り替え"
|
||||
},
|
||||
"upscaling": {
|
||||
"upscaleModel": "アップスケールモデル",
|
||||
"postProcessingModel": "ポストプロセスモデル",
|
||||
"upscale": "アップスケール"
|
||||
},
|
||||
"sdxl": {
|
||||
"denoisingStrength": "ノイズ除去強度",
|
||||
"scheduler": "スケジューラー"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1660,7 +1660,6 @@
|
||||
"clearCaches": "Очистить кэши",
|
||||
"recalculateRects": "Пересчитать прямоугольники",
|
||||
"saveBboxToGallery": "Сохранить рамку в галерею",
|
||||
"resetCanvas": "Сбросить холст",
|
||||
"canvas": "Холст",
|
||||
"global": "Глобальный",
|
||||
"newGlobalReferenceImageError": "Проблема с созданием глобального эталонного изображения",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -96,7 +96,9 @@
|
||||
"view": "视图",
|
||||
"alpha": "透明度通道",
|
||||
"openInViewer": "在查看器中打开",
|
||||
"clipboard": "剪贴板"
|
||||
"clipboard": "剪贴板",
|
||||
"loadingModel": "加载模型",
|
||||
"generating": "生成中"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "预览大小",
|
||||
@@ -587,7 +589,27 @@
|
||||
"huggingFace": "HuggingFace",
|
||||
"hfTokenInvalid": "HF 令牌无效或缺失",
|
||||
"hfTokenLabel": "HuggingFace 令牌(某些模型所需)",
|
||||
"hfTokenHelperText": "使用某些模型需要 HF 令牌。点击这里创建或获取你的令牌。"
|
||||
"hfTokenHelperText": "使用某些模型需要 HF 令牌。点击这里创建或获取你的令牌。",
|
||||
"includesNModels": "包括 {{n}} 个模型及其依赖项",
|
||||
"starterBundles": "启动器包",
|
||||
"learnMoreAboutSupportedModels": "了解更多关于我们支持的模型的信息",
|
||||
"hfForbidden": "您没有权限访问这个 HF 模型",
|
||||
"hfTokenInvalidErrorMessage": "无效或缺失 HuggingFace 令牌。",
|
||||
"hfTokenRequired": "您正在尝试下载一个需要有效 HuggingFace 令牌的模型。",
|
||||
"hfTokenSaved": "HF 令牌已保存",
|
||||
"hfForbiddenErrorMessage": "我们建议访问 HuggingFace.com 上的仓库页面。所有者可能要求您接受条款才能下载。",
|
||||
"hfTokenUnableToVerifyErrorMessage": "无法验证 HuggingFace 令牌。这可能是由于网络错误导致的。请稍后再试。",
|
||||
"hfTokenInvalidErrorMessage2": "在这里更新它。 ",
|
||||
"hfTokenUnableToVerify": "无法验证 HF 令牌",
|
||||
"skippingXDuplicates_other": "跳过 {{count}} 个重复项",
|
||||
"starterBundleHelpText": "轻松安装所有用于启动基础模型所需的模型,包括主模型、ControlNets、IP适配器等。选择一个安装包时,会跳过已安装的模型。",
|
||||
"installingBundle": "正在安装模型包",
|
||||
"installingModel": "正在安装模型",
|
||||
"installingXModels_other": "正在安装 {{count}} 个模型",
|
||||
"t5Encoder": "T5 编码器",
|
||||
"clipLEmbed": "CLIP-L 嵌入",
|
||||
"clipGEmbed": "CLIP-G 嵌入",
|
||||
"loraModels": "LoRAs(低秩适配)"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "图像",
|
||||
@@ -646,8 +668,23 @@
|
||||
"controlAdapterIncompatibleBaseModel": "Control Adapter的基础模型不兼容",
|
||||
"ipAdapterIncompatibleBaseModel": "IP Adapter的基础模型不兼容",
|
||||
"ipAdapterNoImageSelected": "未选择IP Adapter图像",
|
||||
"rgNoRegion": "未选择区域"
|
||||
}
|
||||
"rgNoRegion": "未选择区域",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},边界框宽度为 {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},缩放后的边界框高度为 {{height}}",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},边界框高度为 {{height}}",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}},缩放后的边界框宽度为 {{width}}"
|
||||
},
|
||||
"canvasIsFiltering": "画布正在过滤",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),缩放后的边界框高度为 {{height}}",
|
||||
"noCLIPEmbedModelSelected": "未为FLUX生成选择CLIP嵌入模型",
|
||||
"noFLUXVAEModelSelected": "未为FLUX生成选择VAE模型",
|
||||
"canvasIsRasterizing": "画布正在栅格化",
|
||||
"canvasIsCompositing": "画布正在合成",
|
||||
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),边界框宽度为 {{width}}",
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),缩放后的边界框宽度为 {{width}}",
|
||||
"noT5EncoderModelSelected": "未为FLUX生成选择T5编码器模型",
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),边界框高度为 {{height}}",
|
||||
"canvasIsTransforming": "画布正在变换"
|
||||
},
|
||||
"patchmatchDownScaleSize": "缩小",
|
||||
"clipSkip": "CLIP 跳过层",
|
||||
@@ -669,7 +706,15 @@
|
||||
"sendToUpscale": "发送到放大",
|
||||
"processImage": "处理图像",
|
||||
"infillColorValue": "填充颜色",
|
||||
"coherenceMinDenoise": "最小去噪"
|
||||
"coherenceMinDenoise": "最小去噪",
|
||||
"sendToCanvas": "发送到画布",
|
||||
"disabledNoRasterContent": "已禁用(无栅格内容)",
|
||||
"optimizedImageToImage": "优化的图生图",
|
||||
"guidance": "引导",
|
||||
"gaussianBlur": "高斯模糊",
|
||||
"recallMetadata": "调用元数据",
|
||||
"boxBlur": "方框模糊",
|
||||
"staged": "已分阶段处理"
|
||||
},
|
||||
"settings": {
|
||||
"models": "模型",
|
||||
@@ -699,7 +744,12 @@
|
||||
"enableInformationalPopovers": "启用信息弹窗",
|
||||
"reloadingIn": "重新加载中",
|
||||
"informationalPopoversDisabled": "信息提示框已禁用",
|
||||
"informationalPopoversDisabledDesc": "信息提示框已被禁用.请在设置中重新启用."
|
||||
"informationalPopoversDisabledDesc": "信息提示框已被禁用.请在设置中重新启用.",
|
||||
"enableModelDescriptions": "在下拉菜单中启用模型描述",
|
||||
"confirmOnNewSession": "新会话时确认",
|
||||
"modelDescriptionsDisabledDesc": "下拉菜单中的模型描述已被禁用。可在设置中启用。",
|
||||
"modelDescriptionsDisabled": "下拉菜单中的模型描述已禁用",
|
||||
"showDetailedInvocationProgress": "显示进度详情"
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "上传失败",
|
||||
@@ -740,7 +790,24 @@
|
||||
"errorCopied": "错误信息已复制",
|
||||
"modelImportCanceled": "模型导入已取消",
|
||||
"importFailed": "导入失败",
|
||||
"importSuccessful": "导入成功"
|
||||
"importSuccessful": "导入成功",
|
||||
"layerSavedToAssets": "图层已保存到资产",
|
||||
"sentToUpscale": "已发送到放大处理",
|
||||
"addedToUncategorized": "已添加到看板 $t(boards.uncategorized) 的资产中",
|
||||
"linkCopied": "链接已复制",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "最多只能上传 {{count}} 张 PNG 或 JPEG 图像。",
|
||||
"problemSavingLayer": "无法保存图层",
|
||||
"unableToLoadImage": "无法加载图像",
|
||||
"imageNotLoadedDesc": "无法找到图像",
|
||||
"unableToLoadStylePreset": "无法加载样式预设",
|
||||
"stylePresetLoaded": "样式预设已加载",
|
||||
"problemCopyingLayer": "无法复制图层",
|
||||
"sentToCanvas": "已发送到画布",
|
||||
"unableToLoadImageMetadata": "无法加载图像元数据",
|
||||
"imageSaved": "图像已保存",
|
||||
"imageSavingFailed": "图像保存失败",
|
||||
"layerCopiedToClipboard": "图层已复制到剪贴板",
|
||||
"imagesWillBeAddedTo": "上传的图像将添加到看板 {{boardName}} 的资产中。"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Invoke 进度条",
|
||||
@@ -890,7 +957,12 @@
|
||||
"clearWorkflow": "清除工作流",
|
||||
"imageAccessError": "无法找到图像 {{image_name}},正在恢复默认设置",
|
||||
"boardAccessError": "无法找到面板 {{board_id}},正在恢复默认设置",
|
||||
"modelAccessError": "无法找到模型 {{key}},正在恢复默认设置"
|
||||
"modelAccessError": "无法找到模型 {{key}},正在恢复默认设置",
|
||||
"noWorkflows": "无工作流程",
|
||||
"workflowHelpText": "需要帮助?请查看我们的《<LinkComponent>工作流程入门指南</LinkComponent>》。",
|
||||
"noMatchingWorkflows": "无匹配的工作流程",
|
||||
"saveToGallery": "保存到图库",
|
||||
"singleFieldType": "{{name}}(单一模型)"
|
||||
},
|
||||
"queue": {
|
||||
"status": "状态",
|
||||
@@ -1221,7 +1293,8 @@
|
||||
"heading": "去噪强度",
|
||||
"paragraphs": [
|
||||
"为输入图像添加的噪声量。",
|
||||
"输入 0 会导致结果图像和输入完全相同,输入 1 则会生成全新的图像。"
|
||||
"输入 0 会导致结果图像和输入完全相同,输入 1 则会生成全新的图像。",
|
||||
"当没有具有可见内容的栅格图层时,此设置将被忽略。"
|
||||
]
|
||||
},
|
||||
"paramSeed": {
|
||||
@@ -1410,7 +1483,8 @@
|
||||
"paragraphs": [
|
||||
"控制提示对生成过程的影响程度.",
|
||||
"与生成CFG Scale相似."
|
||||
]
|
||||
],
|
||||
"heading": "CFG比例"
|
||||
},
|
||||
"structure": {
|
||||
"heading": "结构",
|
||||
@@ -1441,6 +1515,62 @@
|
||||
"paragraphs": [
|
||||
"比例控制决定了输出图像的大小,它是基于输入图像分辨率的倍数来计算的.例如对一张1024x1024的图像进行2倍上采样,将会得到一张2048x2048的输出图像."
|
||||
]
|
||||
},
|
||||
"globalReferenceImage": {
|
||||
"heading": "全局参考图像",
|
||||
"paragraphs": [
|
||||
"应用参考图像以影响整个生成过程。"
|
||||
]
|
||||
},
|
||||
"rasterLayer": {
|
||||
"paragraphs": [
|
||||
"画布的基于像素的内容,用于图像生成过程。"
|
||||
],
|
||||
"heading": "栅格图层"
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
"paragraphs": [
|
||||
"对于区域引导,使用画笔引导全局提示中的元素应出现的位置。",
|
||||
"对于区域参考图像,使用画笔将参考图像应用到特定区域。"
|
||||
],
|
||||
"heading": "区域引导与区域参考图像"
|
||||
},
|
||||
"regionalReferenceImage": {
|
||||
"heading": "区域参考图像",
|
||||
"paragraphs": [
|
||||
"使用画笔将参考图像应用到特定区域。"
|
||||
]
|
||||
},
|
||||
"optimizedDenoising": {
|
||||
"heading": "优化的图生图",
|
||||
"paragraphs": [
|
||||
"启用‘优化的图生图’功能,可在使用 Flux 模型进行图生图和图像修复转换时提供更平滑的降噪强度调节。此设置可以提高对图像变化程度的控制能力,但如果您更倾向于使用标准的降噪强度调节方式,也可以关闭此功能。该设置仍在优化中,目前处于测试阶段。"
|
||||
]
|
||||
},
|
||||
"inpainting": {
|
||||
"paragraphs": [
|
||||
"控制由降噪强度引导的修改区域。"
|
||||
],
|
||||
"heading": "图像重绘"
|
||||
},
|
||||
"regionalGuidance": {
|
||||
"heading": "区域引导",
|
||||
"paragraphs": [
|
||||
"使用画笔引导全局提示中的元素应出现的位置。"
|
||||
]
|
||||
},
|
||||
"fluxDevLicense": {
|
||||
"heading": "非商业许可",
|
||||
"paragraphs": [
|
||||
"FLUX.1 [dev] 模型受 FLUX [dev] 非商业许可协议的约束。如需在 Invoke 中将此模型类型用于商业目的,请访问我们的网站了解更多信息。"
|
||||
]
|
||||
},
|
||||
"paramGuidance": {
|
||||
"paragraphs": [
|
||||
"控制提示对生成过程的影响程度。",
|
||||
"较高的引导值可能导致过度饱和,而过高或过低的引导值可能导致生成结果失真。引导仅适用于FLUX DEV模型。"
|
||||
],
|
||||
"heading": "引导"
|
||||
}
|
||||
},
|
||||
"invocationCache": {
|
||||
@@ -1503,7 +1633,18 @@
|
||||
"convertGraph": "转换图表",
|
||||
"loadWorkflow": "$t(common.load) 工作流",
|
||||
"loadFromGraph": "从图表加载工作流",
|
||||
"autoLayout": "自动布局"
|
||||
"autoLayout": "自动布局",
|
||||
"edit": "编辑",
|
||||
"copyShareLinkForWorkflow": "复制工作流程的分享链接",
|
||||
"delete": "删除",
|
||||
"download": "下载",
|
||||
"defaultWorkflows": "默认工作流程",
|
||||
"userWorkflows": "用户工作流程",
|
||||
"projectWorkflows": "项目工作流程",
|
||||
"copyShareLink": "复制分享链接",
|
||||
"chooseWorkflowFromLibrary": "从库中选择工作流程",
|
||||
"uploadAndSaveWorkflow": "上传到库",
|
||||
"deleteWorkflow2": "您确定要删除此工作流程吗?此操作无法撤销。"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
@@ -1542,7 +1683,108 @@
|
||||
"addPositivePrompt": "添加 $t(controlLayers.prompt)",
|
||||
"addNegativePrompt": "添加 $t(controlLayers.negativePrompt)",
|
||||
"rectangle": "矩形",
|
||||
"opacity": "透明度"
|
||||
"opacity": "透明度",
|
||||
"canvas": "画布",
|
||||
"fitBboxToLayers": "将边界框适配到图层",
|
||||
"cropLayerToBbox": "将图层裁剪到边界框",
|
||||
"saveBboxToGallery": "将边界框保存到图库",
|
||||
"savedToGalleryOk": "已保存到图库",
|
||||
"saveLayerToAssets": "将图层保存到资产",
|
||||
"removeBookmark": "移除书签",
|
||||
"regional": "区域",
|
||||
"saveCanvasToGallery": "将画布保存到图库",
|
||||
"global": "全局",
|
||||
"bookmark": "添加书签以快速切换",
|
||||
"regionalReferenceImage": "局部参考图像",
|
||||
"mergingLayers": "正在合并图层",
|
||||
"newControlLayerError": "创建控制层时出现问题",
|
||||
"pullBboxIntoReferenceImageError": "将边界框导入参考图像时出现问题",
|
||||
"mergeVisibleOk": "已合并图层",
|
||||
"maskFill": "遮罩填充",
|
||||
"newCanvasFromImage": "从图像创建新画布",
|
||||
"pullBboxIntoReferenceImageOk": "边界框已导入到参考图像",
|
||||
"globalReferenceImage_withCount_other": "全局参考图像",
|
||||
"addInpaintMask": "添加 $t(controlLayers.inpaintMask)",
|
||||
"referenceImage": "参考图像",
|
||||
"globalReferenceImage": "全局参考图像",
|
||||
"newRegionalGuidance": "新建 $t(controlLayers.regionalGuidance)",
|
||||
"savedToGalleryError": "保存到图库时出错",
|
||||
"copyRasterLayerTo": "复制 $t(controlLayers.rasterLayer) 到",
|
||||
"clearHistory": "清除历史记录",
|
||||
"inpaintMask": "修复遮罩",
|
||||
"regionalGuidance_withCount_visible": "区域引导({{count}} 个)",
|
||||
"inpaintMasks_withCount_hidden": "修复遮罩({{count}} 个已隐藏)",
|
||||
"enableAutoNegative": "启用自动负面提示",
|
||||
"disableAutoNegative": "禁用自动负面提示",
|
||||
"deleteReferenceImage": "删除参考图像",
|
||||
"sendToCanvas": "发送到画布",
|
||||
"controlLayers_withCount_visible": "控制图层({{count}} 个)",
|
||||
"rasterLayers_withCount_visible": "栅格图层({{count}} 个)",
|
||||
"convertRegionalGuidanceTo": "将 $t(controlLayers.regionalGuidance) 转换为",
|
||||
"newInpaintMask": "新建 $t(controlLayers.inpaintMask)",
|
||||
"regionIsEmpty": "选定区域为空",
|
||||
"mergeVisible": "合并可见图层",
|
||||
"showHUD": "显示 HUD(抬头显示)",
|
||||
"newLayerFromImage": "从图像创建新图层",
|
||||
"layer_other": "图层",
|
||||
"transparency": "透明度",
|
||||
"addRasterLayer": "添加 $t(controlLayers.rasterLayer)",
|
||||
"newRasterLayerOk": "已创建栅格层",
|
||||
"newRasterLayerError": "创建栅格层时出现问题",
|
||||
"inpaintMasks_withCount_visible": "修复遮罩({{count}} 个)",
|
||||
"convertRasterLayerTo": "将 $t(controlLayers.rasterLayer) 转换为",
|
||||
"copyControlLayerTo": "复制 $t(controlLayers.controlLayer) 到",
|
||||
"copyInpaintMaskTo": "复制 $t(controlLayers.inpaintMask) 到",
|
||||
"copyRegionalGuidanceTo": "复制 $t(controlLayers.regionalGuidance) 到",
|
||||
"newRasterLayer": "新建 $t(controlLayers.rasterLayer)",
|
||||
"newControlLayer": "新建 $t(controlLayers.controlLayer)",
|
||||
"newImg2ImgCanvasFromImage": "从图像创建新的图生图",
|
||||
"rasterLayer": "栅格层",
|
||||
"controlLayer": "控制层",
|
||||
"outputOnlyMaskedRegions": "仅输出生成的区域",
|
||||
"addControlLayer": "添加 $t(controlLayers.controlLayer)",
|
||||
"newGlobalReferenceImageOk": "已创建全局参考图像",
|
||||
"newGlobalReferenceImageError": "创建全局参考图像时出现问题",
|
||||
"newRegionalReferenceImageOk": "已创建局部参考图像",
|
||||
"newControlLayerOk": "已创建控制层",
|
||||
"mergeVisibleError": "合并图层时出错",
|
||||
"bboxOverlay": "显示边界框覆盖层",
|
||||
"clipToBbox": "将Clip限制到边界框",
|
||||
"width": "宽度",
|
||||
"addGlobalReferenceImage": "添加 $t(controlLayers.globalReferenceImage)",
|
||||
"inpaintMask_withCount_other": "修复遮罩",
|
||||
"regionalGuidance_withCount_other": "区域引导",
|
||||
"newRegionalReferenceImageError": "创建局部参考图像时出现问题",
|
||||
"pullBboxIntoLayerError": "将边界框导入图层时出现问题",
|
||||
"pullBboxIntoLayerOk": "边界框已导入到图层",
|
||||
"sendToCanvasDesc": "按下“Invoke”按钮会将您的工作进度暂存到画布上。",
|
||||
"sendToGallery": "发送到图库",
|
||||
"sendToGalleryDesc": "按下“Invoke”键会生成并保存一张唯一的图像到您的图库中。",
|
||||
"rasterLayer_withCount_other": "栅格图层",
|
||||
"mergeDown": "向下合并",
|
||||
"clearCaches": "清除缓存",
|
||||
"recalculateRects": "重新计算矩形",
|
||||
"duplicate": "复制",
|
||||
"regionalGuidance_withCount_hidden": "区域引导({{count}} 个已隐藏)",
|
||||
"convertControlLayerTo": "将 $t(controlLayers.controlLayer) 转换为",
|
||||
"convertInpaintMaskTo": "将 $t(controlLayers.inpaintMask) 转换为",
|
||||
"viewProgressInViewer": "在 <Btn>图像查看器</Btn> 中查看进度和输出结果。",
|
||||
"viewProgressOnCanvas": "在 <Btn>画布</Btn> 上查看进度和暂存的输出内容。",
|
||||
"sendingToGallery": "将生成内容发送到图库",
|
||||
"copyToClipboard": "复制到剪贴板",
|
||||
"controlLayer_withCount_other": "控制图层",
|
||||
"sendingToCanvas": "在画布上准备生成",
|
||||
"addReferenceImage": "添加 $t(controlLayers.referenceImage)",
|
||||
"addRegionalGuidance": "添加 $t(controlLayers.regionalGuidance)",
|
||||
"controlLayers_withCount_hidden": "控制图层({{count}} 个已隐藏)",
|
||||
"rasterLayers_withCount_hidden": "栅格图层({{count}} 个已隐藏)",
|
||||
"globalReferenceImages_withCount_hidden": "全局参考图像({{count}} 个已隐藏)",
|
||||
"globalReferenceImages_withCount_visible": "全局参考图像({{count}} 个)",
|
||||
"layer_withCount_other": "图层({{count}} 个)",
|
||||
"enableTransparencyEffect": "启用透明效果",
|
||||
"disableTransparencyEffect": "禁用透明效果",
|
||||
"hidingType": "隐藏 {{type}}",
|
||||
"showingType": "显示 {{type}}"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
|
||||
@@ -8,10 +8,8 @@ import { useSyncLoggingConfig } from 'app/logging/useSyncLoggingConfig';
|
||||
import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||
import ImageUploadOverlay from 'common/components/ImageUploadOverlay';
|
||||
import { useFocusRegionWatcher } from 'common/hooks/focus';
|
||||
import { useClearStorage } from 'common/hooks/useClearStorage';
|
||||
import { useFullscreenDropzone } from 'common/hooks/useFullscreenDropzone';
|
||||
import { useGlobalHotkeys } from 'common/hooks/useGlobalHotkeys';
|
||||
import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardModal';
|
||||
import {
|
||||
@@ -19,6 +17,7 @@ import {
|
||||
NewGallerySessionDialog,
|
||||
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
|
||||
import DeleteImageModal from 'features/deleteImageModal/components/DeleteImageModal';
|
||||
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
|
||||
import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicPromptsPreviewModal';
|
||||
import DeleteBoardModal from 'features/gallery/components/Boards/DeleteBoardModal';
|
||||
import { ImageContextMenu } from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
|
||||
@@ -28,6 +27,7 @@ import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/Cl
|
||||
import { DeleteStylePresetDialog } from 'features/stylePresets/components/DeleteStylePresetDialog';
|
||||
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
|
||||
import RefreshAfterResetModal from 'features/system/components/SettingsModal/RefreshAfterResetModal';
|
||||
import { VideosModal } from 'features/system/components/VideosModal/VideosModal';
|
||||
import { configChanged } from 'features/system/store/configSlice';
|
||||
import { selectLanguage } from 'features/system/store/systemSelectors';
|
||||
import { AppContent } from 'features/ui/components/AppContent';
|
||||
@@ -62,8 +62,6 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
useGetOpenAPISchemaQuery();
|
||||
useSyncLoggingConfig();
|
||||
|
||||
const { dropzone, isHandlingUpload, setIsHandlingUpload } = useFullscreenDropzone();
|
||||
|
||||
const handleReset = useCallback(() => {
|
||||
clearStorage();
|
||||
location.reload();
|
||||
@@ -92,19 +90,8 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
|
||||
return (
|
||||
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
|
||||
<Box
|
||||
id="invoke-app-wrapper"
|
||||
w="100dvw"
|
||||
h="100dvh"
|
||||
position="relative"
|
||||
overflow="hidden"
|
||||
{...dropzone.getRootProps()}
|
||||
>
|
||||
<input {...dropzone.getInputProps()} />
|
||||
<Box id="invoke-app-wrapper" w="100dvw" h="100dvh" position="relative" overflow="hidden">
|
||||
<AppContent />
|
||||
{dropzone.isDragActive && isHandlingUpload && (
|
||||
<ImageUploadOverlay dropzone={dropzone} setIsHandlingUpload={setIsHandlingUpload} />
|
||||
)}
|
||||
</Box>
|
||||
<DeleteImageModal />
|
||||
<ChangeBoardModal />
|
||||
@@ -121,6 +108,8 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
<NewGallerySessionDialog />
|
||||
<NewCanvasSessionDialog />
|
||||
<ImageContextMenu />
|
||||
<FullscreenDropzone />
|
||||
<VideosModal />
|
||||
</ErrorBoundary>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { skipToken } from '@reduxjs/toolkit/query';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useIsRegionFocused } from 'common/hooks/focus';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
@@ -8,13 +7,11 @@ import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { memo } from 'react';
|
||||
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
export const GlobalImageHotkeys = memo(() => {
|
||||
useAssertSingleton('GlobalImageHotkeys');
|
||||
const lastSelectedImage = useAppSelector(selectLastSelectedImage);
|
||||
const { currentData: imageDTO } = useGetImageDTOQuery(lastSelectedImage?.image_name ?? skipToken);
|
||||
const imageDTO = useAppSelector(selectLastSelectedImage);
|
||||
|
||||
if (!imageDTO) {
|
||||
return null;
|
||||
|
||||
@@ -19,7 +19,6 @@ import { $workflowCategories } from 'app/store/nanostores/workflowCategories';
|
||||
import { createStore } from 'app/store/store';
|
||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||
import Loading from 'common/components/Loading/Loading';
|
||||
import AppDndContext from 'features/dnd/components/AppDndContext';
|
||||
import type { WorkflowCategory } from 'features/nodes/types/workflow';
|
||||
import type { PropsWithChildren, ReactNode } from 'react';
|
||||
import React, { lazy, memo, useEffect, useLayoutEffect, useMemo } from 'react';
|
||||
@@ -237,9 +236,7 @@ const InvokeAIUI = ({
|
||||
<Provider store={store}>
|
||||
<React.Suspense fallback={<Loading />}>
|
||||
<ThemeLocaleProvider>
|
||||
<AppDndContext>
|
||||
<App config={config} studioInitAction={studioInitAction} />
|
||||
</AppDndContext>
|
||||
<App config={config} studioInitAction={studioInitAction} />
|
||||
</ThemeLocaleProvider>
|
||||
</React.Suspense>
|
||||
</Provider>
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppStore } from 'app/store/storeHooks';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { withResultAsync } from 'common/util/result';
|
||||
@@ -9,6 +10,7 @@ import { imageDTOToImageObject } from 'features/controlLayers/store/util';
|
||||
import { $imageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { sentImageToCanvas } from 'features/gallery/store/actions';
|
||||
import { parseAndRecallAllMetadata } from 'features/metadata/util/handlers';
|
||||
import { $hasTemplates } from 'features/nodes/store/nodesSlice';
|
||||
import { $isWorkflowListMenuIsOpen } from 'features/nodes/store/workflowListMenu';
|
||||
import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
@@ -51,6 +53,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
const { t } = useTranslation();
|
||||
// Use a ref to ensure that we only perform the action once
|
||||
const didInit = useRef(false);
|
||||
const didParseOpenAPISchema = useStore($hasTemplates);
|
||||
const store = useAppStore();
|
||||
const { getAndLoadWorkflow } = useGetAndLoadLibraryWorkflow();
|
||||
|
||||
@@ -174,7 +177,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (didInit.current || !action) {
|
||||
if (didInit.current || !action || !didParseOpenAPISchema) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -187,22 +190,29 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
case 'selectStylePreset':
|
||||
handleSelectStylePreset(action.data.stylePresetId);
|
||||
break;
|
||||
|
||||
case 'sendToCanvas':
|
||||
handleSendToCanvas(action.data.imageName);
|
||||
break;
|
||||
|
||||
case 'useAllParameters':
|
||||
handleUseAllMetadata(action.data.imageName);
|
||||
break;
|
||||
|
||||
case 'goToDestination':
|
||||
handleGoToDestination(action.data.destination);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}, [
|
||||
handleSendToCanvas,
|
||||
handleUseAllMetadata,
|
||||
action,
|
||||
handleLoadWorkflow,
|
||||
handleSelectStylePreset,
|
||||
handleGoToDestination,
|
||||
handleLoadWorkflow,
|
||||
didParseOpenAPISchema,
|
||||
]);
|
||||
};
|
||||
|
||||
@@ -17,6 +17,7 @@ const $logger = atom<Logger>(Roarr.child(BASE_CONTEXT));
|
||||
export const zLogNamespace = z.enum([
|
||||
'canvas',
|
||||
'config',
|
||||
'dnd',
|
||||
'events',
|
||||
'gallery',
|
||||
'generation',
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
export const STORAGE_PREFIX = '@@invokeai-';
|
||||
export const EMPTY_ARRAY = [];
|
||||
/** @knipignore */
|
||||
export const EMPTY_OBJECT = {};
|
||||
|
||||
@@ -16,7 +16,6 @@ import { addGalleryOffsetChangedListener } from 'app/store/middleware/listenerMi
|
||||
import { addGetOpenAPISchemaListener } from 'app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema';
|
||||
import { addImageAddedToBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard';
|
||||
import { addImageDeletionListeners } from 'app/store/middleware/listenerMiddleware/listeners/imageDeletionListeners';
|
||||
import { addImageDroppedListener } from 'app/store/middleware/listenerMiddleware/listeners/imageDropped';
|
||||
import { addImageRemovedFromBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard';
|
||||
import { addImagesStarredListener } from 'app/store/middleware/listenerMiddleware/listeners/imagesStarred';
|
||||
import { addImagesUnstarredListener } from 'app/store/middleware/listenerMiddleware/listeners/imagesUnstarred';
|
||||
@@ -93,9 +92,6 @@ addGetOpenAPISchemaListener(startAppListening);
|
||||
addWorkflowLoadRequestedListener(startAppListening);
|
||||
addUpdateAllNodesRequestedListener(startAppListening);
|
||||
|
||||
// DND
|
||||
addImageDroppedListener(startAppListening);
|
||||
|
||||
// Models
|
||||
addModelSelectedListener(startAppListening);
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { SerializableObject } from 'common/types';
|
||||
import { buildAdHocPostProcessingGraph } from 'features/nodes/util/graph/buildAdHocPostProcessingGraph';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig, ImageDTO } from 'services/api/types';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
const log = logger('queue');
|
||||
|
||||
@@ -32,16 +32,14 @@ export const addAdHocPostProcessingRequestedListener = (startAppListening: AppSt
|
||||
|
||||
try {
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, enqueueMutationFixedCacheKeyOptions)
|
||||
);
|
||||
|
||||
const enqueueResult = await req.unwrap();
|
||||
req.reset();
|
||||
log.debug({ enqueueResult } as SerializableObject, t('queue.graphQueued'));
|
||||
log.debug({ enqueueResult } as JsonObject, t('queue.graphQueued'));
|
||||
} catch (error) {
|
||||
log.error({ enqueueBatchArg } as SerializableObject, t('queue.graphFailedToQueue'));
|
||||
log.error({ enqueueBatchArg } as JsonObject, t('queue.graphFailedToQueue'));
|
||||
|
||||
if (error instanceof Object && 'status' in error && error.status === 403) {
|
||||
return;
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { SerializableObject } from 'common/types';
|
||||
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { truncate, upperFirst } from 'lodash-es';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
const log = logger('queue');
|
||||
|
||||
@@ -17,7 +17,7 @@ export const addBatchEnqueuedListener = (startAppListening: AppStartListening) =
|
||||
effect: (action) => {
|
||||
const enqueueResult = action.payload;
|
||||
const arg = action.meta.arg.originalArgs;
|
||||
log.debug({ enqueueResult } as SerializableObject, 'Batch enqueued');
|
||||
log.debug({ enqueueResult } as JsonObject, 'Batch enqueued');
|
||||
|
||||
toast({
|
||||
id: 'QUEUE_BATCH_SUCCEEDED',
|
||||
@@ -45,7 +45,7 @@ export const addBatchEnqueuedListener = (startAppListening: AppStartListening) =
|
||||
status: 'error',
|
||||
description: t('common.unknownError'),
|
||||
});
|
||||
log.error({ batchConfig } as SerializableObject, t('queue.batchFailedToQueue'));
|
||||
log.error({ batchConfig } as JsonObject, t('queue.batchFailedToQueue'));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ export const addBatchEnqueuedListener = (startAppListening: AppStartListening) =
|
||||
description: t('common.unknownError'),
|
||||
});
|
||||
}
|
||||
log.error({ batchConfig, error: serializeError(response) } as SerializableObject, t('queue.batchFailedToQueue'));
|
||||
log.error({ batchConfig, error: serializeError(response) } as JsonObject, t('queue.batchFailedToQueue'));
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,19 +1,22 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { SerializableObject } from 'common/types';
|
||||
import { extractMessageFromAssertionError } from 'common/util/extractMessageFromAssertionError';
|
||||
import type { Result } from 'common/util/result';
|
||||
import { withResult, withResultAsync } from 'common/util/result';
|
||||
import { $canvasManager } from 'features/controlLayers/store/ephemeral';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph';
|
||||
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
|
||||
import { buildSD3Graph } from 'features/nodes/util/graph/generation/buildSD3Graph';
|
||||
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
|
||||
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
import { assert, AssertionError } from 'tsafe';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
const log = logger('generation');
|
||||
|
||||
@@ -32,8 +35,8 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
let buildGraphResult: Result<
|
||||
{
|
||||
g: Graph;
|
||||
noise: Invocation<'noise' | 'flux_denoise'>;
|
||||
posCond: Invocation<'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder'>;
|
||||
noise: Invocation<'noise' | 'flux_denoise' | 'sd3_denoise'>;
|
||||
posCond: Invocation<'compel' | 'sdxl_compel_prompt' | 'flux_text_encoder' | 'sd3_text_encoder'>;
|
||||
},
|
||||
Error
|
||||
>;
|
||||
@@ -49,6 +52,9 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
case `sd-2`:
|
||||
buildGraphResult = await withResultAsync(() => buildSD1Graph(state, manager));
|
||||
break;
|
||||
case `sd-3`:
|
||||
buildGraphResult = await withResultAsync(() => buildSD3Graph(state, manager));
|
||||
break;
|
||||
case `flux`:
|
||||
buildGraphResult = await withResultAsync(() => buildFLUXGraph(state, manager));
|
||||
break;
|
||||
@@ -57,7 +63,17 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
}
|
||||
|
||||
if (buildGraphResult.isErr()) {
|
||||
log.error({ error: serializeError(buildGraphResult.error) }, 'Failed to build graph');
|
||||
let description: string | null = null;
|
||||
if (buildGraphResult.error instanceof AssertionError) {
|
||||
description = extractMessageFromAssertionError(buildGraphResult.error);
|
||||
}
|
||||
const error = serializeError(buildGraphResult.error);
|
||||
log.error({ error }, 'Failed to build graph');
|
||||
toast({
|
||||
status: 'error',
|
||||
title: 'Failed to build graph',
|
||||
description,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -75,9 +91,7 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
}
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(prepareBatchResult.value, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
queueApi.endpoints.enqueueBatch.initiate(prepareBatchResult.value, enqueueMutationFixedCacheKeyOptions)
|
||||
);
|
||||
req.reset();
|
||||
|
||||
@@ -88,7 +102,7 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
return;
|
||||
}
|
||||
|
||||
log.debug({ batchConfig: prepareBatchResult.value } as SerializableObject, 'Enqueued batch');
|
||||
log.debug({ batchConfig: prepareBatchResult.value } as JsonObject, 'Enqueued batch');
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
import { isImageFieldCollectionInputInstance } from 'features/nodes/types/field';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { buildNodesGraph } from 'features/nodes/util/graph/buildNodesGraph';
|
||||
import { buildWorkflowWithValidation } from 'features/nodes/util/workflow/buildWorkflow';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig } from 'services/api/types';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { Batch, BatchConfig } from 'services/api/types';
|
||||
|
||||
const log = logger('workflows');
|
||||
|
||||
export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
@@ -26,6 +31,33 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
delete builtWorkflow.id;
|
||||
}
|
||||
|
||||
const data: Batch['data'] = [];
|
||||
|
||||
// Skip edges from batch nodes - these should not be in the graph, they exist only in the UI
|
||||
const imageBatchNodes = nodes.nodes.filter(isInvocationNode).filter((node) => node.data.type === 'image_batch');
|
||||
for (const node of imageBatchNodes) {
|
||||
const images = node.data.inputs['images'];
|
||||
if (!isImageFieldCollectionInputInstance(images)) {
|
||||
log.warn({ nodeId: node.id }, 'Image batch images field is not an image collection');
|
||||
break;
|
||||
}
|
||||
const edgesFromImageBatch = nodes.edges.filter((e) => e.source === node.id && e.sourceHandle === 'image');
|
||||
const batchDataCollectionItem: NonNullable<Batch['data']>[number] = [];
|
||||
for (const edge of edgesFromImageBatch) {
|
||||
if (!edge.targetHandle) {
|
||||
break;
|
||||
}
|
||||
batchDataCollectionItem.push({
|
||||
node_path: edge.target,
|
||||
field_name: edge.targetHandle,
|
||||
items: images.value,
|
||||
});
|
||||
}
|
||||
if (batchDataCollectionItem.length > 0) {
|
||||
data.push(batchDataCollectionItem);
|
||||
}
|
||||
}
|
||||
|
||||
const batchConfig: BatchConfig = {
|
||||
batch: {
|
||||
graph,
|
||||
@@ -33,15 +65,12 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
runs: state.params.iterations,
|
||||
origin: 'workflows',
|
||||
destination: 'gallery',
|
||||
data,
|
||||
},
|
||||
prepend: action.payload.prepend,
|
||||
};
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
const req = dispatch(queueApi.endpoints.enqueueBatch.initiate(batchConfig, enqueueMutationFixedCacheKeyOptions));
|
||||
try {
|
||||
await req.unwrap();
|
||||
} finally {
|
||||
|
||||
@@ -2,7 +2,7 @@ import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildMultidiffusionUpscaleGraph } from 'features/nodes/util/graph/buildMultidiffusionUpscaleGraph';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
|
||||
export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
@@ -16,11 +16,7 @@ export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening)
|
||||
|
||||
const batchConfig = prepareLinearUIBatch(state, g, prepend, noise, posCond, 'upscaling', 'gallery');
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
const req = dispatch(queueApi.endpoints.enqueueBatch.initiate(batchConfig, enqueueMutationFixedCacheKeyOptions));
|
||||
try {
|
||||
await req.unwrap();
|
||||
} finally {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { SerializableObject } from 'common/types';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { parseSchema } from 'features/nodes/util/schema/parseSchema';
|
||||
import { size } from 'lodash-es';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
const log = logger('system');
|
||||
|
||||
@@ -16,12 +16,12 @@ export const addGetOpenAPISchemaListener = (startAppListening: AppStartListening
|
||||
effect: (action, { getState }) => {
|
||||
const schemaJSON = action.payload;
|
||||
|
||||
log.debug({ schemaJSON: parseify(schemaJSON) } as SerializableObject, 'Received OpenAPI schema');
|
||||
log.debug({ schemaJSON: parseify(schemaJSON) } as JsonObject, 'Received OpenAPI schema');
|
||||
const { nodesAllowlist, nodesDenylist } = getState().config;
|
||||
|
||||
const nodeTemplates = parseSchema(schemaJSON, nodesAllowlist, nodesDenylist);
|
||||
|
||||
log.debug({ nodeTemplates } as SerializableObject, `Built ${size(nodeTemplates)} node templates`);
|
||||
log.debug({ nodeTemplates } as JsonObject, `Built ${size(nodeTemplates)} node templates`);
|
||||
|
||||
$templates.set(nodeTemplates);
|
||||
},
|
||||
|
||||
@@ -1,334 +0,0 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { selectDefaultControlAdapter, selectDefaultIPAdapter } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import {
|
||||
controlLayerAdded,
|
||||
entityRasterized,
|
||||
entitySelected,
|
||||
inpaintMaskAdded,
|
||||
rasterLayerAdded,
|
||||
referenceImageAdded,
|
||||
referenceImageIPAdapterImageChanged,
|
||||
rgAdded,
|
||||
rgIPAdapterImageChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import type {
|
||||
CanvasControlLayerState,
|
||||
CanvasInpaintMaskState,
|
||||
CanvasRasterLayerState,
|
||||
CanvasReferenceImageState,
|
||||
CanvasRegionalGuidanceState,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import { imageDTOToImageObject, imageDTOToImageWithDims } from 'features/controlLayers/store/util';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { isValidDrop } from 'features/dnd/util/isValidDrop';
|
||||
import { imageToCompareChanged, selectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { upscaleInitialImageChanged } from 'features/parameters/store/upscaleSlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
export const dndDropped = createAction<{
|
||||
overData: TypesafeDroppableData;
|
||||
activeData: TypesafeDraggableData;
|
||||
}>('dnd/dndDropped');
|
||||
|
||||
const log = logger('system');
|
||||
|
||||
export const addImageDroppedListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
actionCreator: dndDropped,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
const { activeData, overData } = action.payload;
|
||||
if (!isValidDrop(overData, activeData)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (activeData.payloadType === 'IMAGE_DTO') {
|
||||
log.debug({ activeData, overData }, 'Image dropped');
|
||||
} else if (activeData.payloadType === 'GALLERY_SELECTION') {
|
||||
log.debug({ activeData, overData }, `Images (${getState().gallery.selection.length}) dropped`);
|
||||
} else if (activeData.payloadType === 'NODE_FIELD') {
|
||||
log.debug({ activeData, overData }, 'Node field dropped');
|
||||
} else {
|
||||
log.debug({ activeData, overData }, `Unknown payload dropped`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on IP Adapter Layer
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SET_IPA_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { id } = overData.context;
|
||||
dispatch(
|
||||
referenceImageIPAdapterImageChanged({
|
||||
entityIdentifier: { id, type: 'reference_image' },
|
||||
imageDTO: activeData.payload.imageDTO,
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on RG Layer IP Adapter
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SET_RG_IP_ADAPTER_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { id, referenceImageId } = overData.context;
|
||||
dispatch(
|
||||
rgIPAdapterImageChanged({
|
||||
entityIdentifier: { id, type: 'regional_guidance' },
|
||||
referenceImageId,
|
||||
imageDTO: activeData.payload.imageDTO,
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on Raster layer
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'ADD_RASTER_LAYER_FROM_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
|
||||
const { x, y } = selectCanvasSlice(getState()).bbox.rect;
|
||||
const overrides: Partial<CanvasRasterLayerState> = {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
};
|
||||
dispatch(rasterLayerAdded({ overrides, isSelected: true }));
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
/**
|
||||
* Image dropped on Inpaint Mask
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'ADD_INPAINT_MASK_FROM_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
|
||||
const { x, y } = selectCanvasSlice(getState()).bbox.rect;
|
||||
const overrides: Partial<CanvasInpaintMaskState> = {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
};
|
||||
dispatch(inpaintMaskAdded({ overrides, isSelected: true }));
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
/**
|
||||
* Image dropped on Regional Guidance
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'ADD_REGIONAL_GUIDANCE_FROM_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
|
||||
const { x, y } = selectCanvasSlice(getState()).bbox.rect;
|
||||
const overrides: Partial<CanvasRegionalGuidanceState> = {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
};
|
||||
dispatch(rgAdded({ overrides, isSelected: true }));
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on Raster layer
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'ADD_CONTROL_LAYER_FROM_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const state = getState();
|
||||
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
|
||||
const { x, y } = selectCanvasSlice(state).bbox.rect;
|
||||
const defaultControlAdapter = selectDefaultControlAdapter(state);
|
||||
const overrides: Partial<CanvasControlLayerState> = {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
controlAdapter: defaultControlAdapter,
|
||||
};
|
||||
dispatch(controlLayerAdded({ overrides, isSelected: true }));
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
overData.actionType === 'ADD_REGIONAL_REFERENCE_IMAGE_FROM_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const state = getState();
|
||||
const ipAdapter = deepClone(selectDefaultIPAdapter(state));
|
||||
ipAdapter.image = imageDTOToImageWithDims(activeData.payload.imageDTO);
|
||||
const overrides: Partial<CanvasRegionalGuidanceState> = {
|
||||
referenceImages: [{ id: getPrefixedId('regional_guidance_reference_image'), ipAdapter }],
|
||||
};
|
||||
dispatch(rgAdded({ overrides, isSelected: true }));
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
overData.actionType === 'ADD_GLOBAL_REFERENCE_IMAGE_FROM_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const state = getState();
|
||||
const ipAdapter = deepClone(selectDefaultIPAdapter(state));
|
||||
ipAdapter.image = imageDTOToImageWithDims(activeData.payload.imageDTO);
|
||||
const overrides: Partial<CanvasReferenceImageState> = {
|
||||
ipAdapter,
|
||||
};
|
||||
dispatch(referenceImageAdded({ overrides, isSelected: true }));
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on Raster layer
|
||||
*/
|
||||
if (overData.actionType === 'REPLACE_LAYER_WITH_IMAGE' && activeData.payloadType === 'IMAGE_DTO') {
|
||||
const state = getState();
|
||||
const { entityIdentifier } = overData.context;
|
||||
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
|
||||
const { x, y } = selectCanvasSlice(state).bbox.rect;
|
||||
dispatch(entityRasterized({ entityIdentifier, imageObject, position: { x, y }, replaceObjects: true }));
|
||||
dispatch(entitySelected({ entityIdentifier }));
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on node image field
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SET_NODES_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { fieldName, nodeId } = overData.context;
|
||||
dispatch(
|
||||
fieldImageValueChanged({
|
||||
nodeId,
|
||||
fieldName,
|
||||
value: activeData.payload.imageDTO,
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image selected for compare
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SELECT_FOR_COMPARE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { imageDTO } = activeData.payload;
|
||||
dispatch(imageToCompareChanged(imageDTO));
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on user board
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'ADD_TO_BOARD' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { imageDTO } = activeData.payload;
|
||||
const { boardId } = overData.context;
|
||||
dispatch(
|
||||
imagesApi.endpoints.addImageToBoard.initiate({
|
||||
imageDTO,
|
||||
board_id: boardId,
|
||||
})
|
||||
);
|
||||
dispatch(selectionChanged([]));
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on 'none' board
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'REMOVE_FROM_BOARD' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { imageDTO } = activeData.payload;
|
||||
dispatch(
|
||||
imagesApi.endpoints.removeImageFromBoard.initiate({
|
||||
imageDTO,
|
||||
})
|
||||
);
|
||||
dispatch(selectionChanged([]));
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on upscale initial image
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SET_UPSCALE_INITIAL_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { imageDTO } = activeData.payload;
|
||||
|
||||
dispatch(upscaleInitialImageChanged(imageDTO));
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Multiple images dropped on user board
|
||||
*/
|
||||
if (overData.actionType === 'ADD_TO_BOARD' && activeData.payloadType === 'GALLERY_SELECTION') {
|
||||
const imageDTOs = getState().gallery.selection;
|
||||
const { boardId } = overData.context;
|
||||
dispatch(
|
||||
imagesApi.endpoints.addImagesToBoard.initiate({
|
||||
imageDTOs,
|
||||
board_id: boardId,
|
||||
})
|
||||
);
|
||||
dispatch(selectionChanged([]));
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Multiple images dropped on 'none' board
|
||||
*/
|
||||
if (overData.actionType === 'REMOVE_FROM_BOARD' && activeData.payloadType === 'GALLERY_SELECTION') {
|
||||
const imageDTOs = getState().gallery.selection;
|
||||
dispatch(
|
||||
imagesApi.endpoints.removeImagesFromBoard.initiate({
|
||||
imageDTOs,
|
||||
})
|
||||
);
|
||||
dispatch(selectionChanged([]));
|
||||
return;
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
@@ -1,18 +1,8 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { RootState } from 'app/store/store';
|
||||
import {
|
||||
entityRasterized,
|
||||
entitySelected,
|
||||
referenceImageIPAdapterImageChanged,
|
||||
rgIPAdapterImageChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { imageDTOToImageObject } from 'features/controlLayers/store/util';
|
||||
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import { boardIdSelected, galleryViewChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { upscaleInitialImageChanged } from 'features/parameters/store/upscaleSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { omit } from 'lodash-es';
|
||||
@@ -51,12 +41,14 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
|
||||
log.debug({ imageDTO }, 'Image uploaded');
|
||||
|
||||
const { postUploadAction } = action.meta.arg.originalArgs;
|
||||
|
||||
if (!postUploadAction) {
|
||||
if (action.meta.arg.originalArgs.silent || imageDTO.is_intermediate) {
|
||||
// When a "silent" upload is requested, or the image is intermediate, we can skip all post-upload actions,
|
||||
// like toasts and switching the gallery view
|
||||
return;
|
||||
}
|
||||
|
||||
const boardId = imageDTO.board_id ?? 'none';
|
||||
|
||||
const DEFAULT_UPLOADED_TOAST = {
|
||||
id: 'IMAGE_UPLOADED',
|
||||
title: t('toast.imageUploaded'),
|
||||
@@ -64,80 +56,34 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
} as const;
|
||||
|
||||
// default action - just upload and alert user
|
||||
if (postUploadAction.type === 'TOAST') {
|
||||
const boardId = imageDTO.board_id ?? 'none';
|
||||
if (lastUploadedToastTimeout !== null) {
|
||||
window.clearTimeout(lastUploadedToastTimeout);
|
||||
}
|
||||
const toastApi = toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
title: postUploadAction.title || DEFAULT_UPLOADED_TOAST.title,
|
||||
description: getUploadedToastDescription(boardId, state),
|
||||
duration: null, // we will close the toast manually
|
||||
});
|
||||
lastUploadedToastTimeout = window.setTimeout(() => {
|
||||
toastApi.close();
|
||||
}, 3000);
|
||||
/**
|
||||
* We only want to change the board and view if this is the first upload of a batch, else we end up hijacking
|
||||
* the user's gallery board and view selection:
|
||||
* - User uploads multiple images
|
||||
* - A couple uploads finish, but others are pending still
|
||||
* - User changes the board selection
|
||||
* - Pending uploads finish and change the board back to the original board
|
||||
* - User is confused as to why the board changed
|
||||
*
|
||||
* Default to true to not require _all_ image upload handlers to set this value
|
||||
*/
|
||||
const isFirstUploadOfBatch = action.meta.arg.originalArgs.isFirstUploadOfBatch ?? true;
|
||||
if (isFirstUploadOfBatch) {
|
||||
dispatch(boardIdSelected({ boardId }));
|
||||
dispatch(galleryViewChanged('assets'));
|
||||
}
|
||||
return;
|
||||
if (lastUploadedToastTimeout !== null) {
|
||||
window.clearTimeout(lastUploadedToastTimeout);
|
||||
}
|
||||
const toastApi = toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
title: DEFAULT_UPLOADED_TOAST.title,
|
||||
description: getUploadedToastDescription(boardId, state),
|
||||
duration: null, // we will close the toast manually
|
||||
});
|
||||
lastUploadedToastTimeout = window.setTimeout(() => {
|
||||
toastApi.close();
|
||||
}, 3000);
|
||||
|
||||
if (postUploadAction.type === 'SET_UPSCALE_INITIAL_IMAGE') {
|
||||
dispatch(upscaleInitialImageChanged(imageDTO));
|
||||
toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: 'set as upscale initial image',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction.type === 'SET_IPA_IMAGE') {
|
||||
const { id } = postUploadAction;
|
||||
dispatch(referenceImageIPAdapterImageChanged({ entityIdentifier: { id, type: 'reference_image' }, imageDTO }));
|
||||
toast({ ...DEFAULT_UPLOADED_TOAST, description: t('toast.setControlImage') });
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction.type === 'SET_RG_IP_ADAPTER_IMAGE') {
|
||||
const { id, referenceImageId } = postUploadAction;
|
||||
dispatch(
|
||||
rgIPAdapterImageChanged({ entityIdentifier: { id, type: 'regional_guidance' }, referenceImageId, imageDTO })
|
||||
);
|
||||
toast({ ...DEFAULT_UPLOADED_TOAST, description: t('toast.setControlImage') });
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction.type === 'SET_NODES_IMAGE') {
|
||||
const { nodeId, fieldName } = postUploadAction;
|
||||
dispatch(fieldImageValueChanged({ nodeId, fieldName, value: imageDTO }));
|
||||
toast({ ...DEFAULT_UPLOADED_TOAST, description: `${t('toast.setNodeField')} ${fieldName}` });
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction.type === 'REPLACE_LAYER_WITH_IMAGE') {
|
||||
const { entityIdentifier } = postUploadAction;
|
||||
|
||||
const state = getState();
|
||||
const imageObject = imageDTOToImageObject(imageDTO);
|
||||
const { x, y } = selectCanvasSlice(state).bbox.rect;
|
||||
dispatch(entityRasterized({ entityIdentifier, imageObject, position: { x, y }, replaceObjects: true }));
|
||||
dispatch(entitySelected({ entityIdentifier }));
|
||||
return;
|
||||
/**
|
||||
* We only want to change the board and view if this is the first upload of a batch, else we end up hijacking
|
||||
* the user's gallery board and view selection:
|
||||
* - User uploads multiple images
|
||||
* - A couple uploads finish, but others are pending still
|
||||
* - User changes the board selection
|
||||
* - Pending uploads finish and change the board back to the original board
|
||||
* - User is confused as to why the board changed
|
||||
*
|
||||
* Default to true to not require _all_ image upload handlers to set this value
|
||||
*/
|
||||
const isFirstUploadOfBatch = action.meta.arg.originalArgs.isFirstUploadOfBatch ?? true;
|
||||
if (isFirstUploadOfBatch) {
|
||||
dispatch(boardIdSelected({ boardId }));
|
||||
dispatch(galleryViewChanged('assets'));
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppDispatch, RootState } from 'app/store/store';
|
||||
import type { SerializableObject } from 'common/types';
|
||||
import {
|
||||
controlLayerModelChanged,
|
||||
referenceImageIPAdapterModelChanged,
|
||||
@@ -41,6 +40,7 @@ import {
|
||||
isSpandrelImageToImageModelConfig,
|
||||
isT5EncoderModelConfig,
|
||||
} from 'services/api/types';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
const log = logger('models');
|
||||
|
||||
@@ -85,7 +85,7 @@ type ModelHandler = (
|
||||
models: AnyModelConfig[],
|
||||
state: RootState,
|
||||
dispatch: AppDispatch,
|
||||
log: Logger<SerializableObject>
|
||||
log: Logger<JsonObject>
|
||||
) => undefined;
|
||||
|
||||
const handleMainModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
@@ -164,7 +164,7 @@ const handleVAEModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
// We have a VAE selected, need to check if it is available
|
||||
|
||||
// Grab just the VAE models
|
||||
const vaeModels = models.filter(isNonFluxVAEModelConfig);
|
||||
const vaeModels = models.filter((m) => isNonFluxVAEModelConfig(m));
|
||||
|
||||
// If the current VAE model is available, we don't need to do anything
|
||||
if (vaeModels.some((m) => m.key === selectedVAEModel.key)) {
|
||||
@@ -297,7 +297,7 @@ const handleUpscaleModel: ModelHandler = (models, state, dispatch, log) => {
|
||||
|
||||
const handleT5EncoderModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
const selectedT5EncoderModel = state.params.t5EncoderModel;
|
||||
const t5EncoderModels = models.filter(isT5EncoderModelConfig);
|
||||
const t5EncoderModels = models.filter((m) => isT5EncoderModelConfig(m));
|
||||
|
||||
// If the currently selected model is available, we don't need to do anything
|
||||
if (selectedT5EncoderModel && t5EncoderModels.some((m) => m.key === selectedT5EncoderModel.key)) {
|
||||
@@ -325,7 +325,7 @@ const handleT5EncoderModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
|
||||
const handleCLIPEmbedModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
const selectedCLIPEmbedModel = state.params.clipEmbedModel;
|
||||
const CLIPEmbedModels = models.filter(isCLIPEmbedModelConfig);
|
||||
const CLIPEmbedModels = models.filter((m) => isCLIPEmbedModelConfig(m));
|
||||
|
||||
// If the currently selected model is available, we don't need to do anything
|
||||
if (selectedCLIPEmbedModel && CLIPEmbedModels.some((m) => m.key === selectedCLIPEmbedModel.key)) {
|
||||
@@ -353,7 +353,7 @@ const handleCLIPEmbedModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
|
||||
const handleFLUXVAEModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
const selectedFLUXVAEModel = state.params.fluxVAE;
|
||||
const fluxVAEModels = models.filter(isFluxVAEModelConfig);
|
||||
const fluxVAEModels = models.filter((m) => isFluxVAEModelConfig(m));
|
||||
|
||||
// If the currently selected model is available, we don't need to do anything
|
||||
if (selectedFLUXVAEModel && fluxVAEModels.some((m) => m.key === selectedFLUXVAEModel.key)) {
|
||||
|
||||
@@ -4,8 +4,10 @@ import { atom } from 'nanostores';
|
||||
/**
|
||||
* A fallback non-writable atom that always returns `false`, used when a nanostores atom is only conditionally available
|
||||
* in a hook or component.
|
||||
*
|
||||
* @knipignore
|
||||
*/
|
||||
// export const $false: ReadableAtom<boolean> = atom(false);
|
||||
export const $false: ReadableAtom<boolean> = atom(false);
|
||||
/**
|
||||
* A fallback non-writable atom that always returns `true`, used when a nanostores atom is only conditionally available
|
||||
* in a hook or component.
|
||||
|
||||
@@ -3,7 +3,6 @@ import { autoBatchEnhancer, combineReducers, configureStore } from '@reduxjs/too
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { idbKeyValDriver } from 'app/store/enhancers/reduxRemember/driver';
|
||||
import { errorHandler } from 'app/store/enhancers/reduxRemember/errors';
|
||||
import type { SerializableObject } from 'common/types';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { changeBoardModalSlice } from 'features/changeBoardModal/store/slice';
|
||||
import { canvasSettingsPersistConfig, canvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
@@ -37,6 +36,7 @@ import undoable from 'redux-undo';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { api } from 'services/api';
|
||||
import { authToastMiddleware } from 'services/api/authToastMiddleware';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
import { STORAGE_PREFIX } from './constants';
|
||||
import { actionSanitizer } from './middleware/devtools/actionSanitizer';
|
||||
@@ -139,7 +139,7 @@ const unserialize: UnserializeFunction = (data, key) => {
|
||||
{
|
||||
persistedData: parsed,
|
||||
rehydratedData: transformed,
|
||||
diff: diff(parsed, transformed) as SerializableObject, // this is always serializable
|
||||
diff: diff(parsed, transformed) as JsonObject, // this is always serializable
|
||||
},
|
||||
`Rehydrated slice "${key}"`
|
||||
);
|
||||
|
||||
@@ -26,7 +26,6 @@ export type AppFeature =
|
||||
| 'bulkDownload'
|
||||
| 'starterModels'
|
||||
| 'hfToken';
|
||||
|
||||
/**
|
||||
* A disable-able Stable Diffusion feature
|
||||
*/
|
||||
|
||||
@@ -1,251 +0,0 @@
|
||||
import type { ChakraProps, FlexProps, SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Flex, Icon, Image } from '@invoke-ai/ui-library';
|
||||
import { IAILoadingImageFallback, IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import ImageMetadataOverlay from 'common/components/ImageMetadataOverlay';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { useImageContextMenu } from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
|
||||
import type { MouseEvent, ReactElement, ReactNode, SyntheticEvent } from 'react';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
import { PiImageBold, PiUploadSimpleBold } from 'react-icons/pi';
|
||||
import type { ImageDTO, PostUploadAction } from 'services/api/types';
|
||||
|
||||
import IAIDraggable from './IAIDraggable';
|
||||
import IAIDroppable from './IAIDroppable';
|
||||
|
||||
const defaultUploadElement = <Icon as={PiUploadSimpleBold} boxSize={16} />;
|
||||
|
||||
const defaultNoContentFallback = <IAINoContentFallback icon={PiImageBold} />;
|
||||
|
||||
const baseStyles: SystemStyleObject = {
|
||||
touchAction: 'none',
|
||||
userSelect: 'none',
|
||||
webkitUserSelect: 'none',
|
||||
};
|
||||
|
||||
const sx: SystemStyleObject = {
|
||||
...baseStyles,
|
||||
'.gallery-image-container::before': {
|
||||
content: '""',
|
||||
display: 'inline-block',
|
||||
position: 'absolute',
|
||||
top: 0,
|
||||
left: 0,
|
||||
right: 0,
|
||||
bottom: 0,
|
||||
pointerEvents: 'none',
|
||||
borderRadius: 'base',
|
||||
},
|
||||
'&[data-selected="selected"]>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-500), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
},
|
||||
'&[data-selected="selectedForCompare"]>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-300), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
},
|
||||
'&:hover>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 2px var(--invoke-colors-invokeBlue-300), inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-800)',
|
||||
},
|
||||
'&:hover[data-selected="selected"]>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeBlue-400), inset 0px 0px 0px 4px var(--invoke-colors-invokeBlue-800)',
|
||||
},
|
||||
'&:hover[data-selected="selectedForCompare"]>.gallery-image-container::before': {
|
||||
boxShadow:
|
||||
'inset 0px 0px 0px 3px var(--invoke-colors-invokeGreen-200), inset 0px 0px 0px 4px var(--invoke-colors-invokeGreen-800)',
|
||||
},
|
||||
};
|
||||
|
||||
type IAIDndImageProps = FlexProps & {
|
||||
imageDTO: ImageDTO | undefined;
|
||||
onError?: (event: SyntheticEvent<HTMLImageElement>) => void;
|
||||
onLoad?: (event: SyntheticEvent<HTMLImageElement>) => void;
|
||||
onClick?: (event: MouseEvent<HTMLDivElement>) => void;
|
||||
withMetadataOverlay?: boolean;
|
||||
isDragDisabled?: boolean;
|
||||
isDropDisabled?: boolean;
|
||||
isUploadDisabled?: boolean;
|
||||
minSize?: number;
|
||||
postUploadAction?: PostUploadAction;
|
||||
imageSx?: ChakraProps['sx'];
|
||||
fitContainer?: boolean;
|
||||
droppableData?: TypesafeDroppableData;
|
||||
draggableData?: TypesafeDraggableData;
|
||||
dropLabel?: string;
|
||||
isSelected?: boolean;
|
||||
isSelectedForCompare?: boolean;
|
||||
thumbnail?: boolean;
|
||||
noContentFallback?: ReactElement;
|
||||
useThumbailFallback?: boolean;
|
||||
withHoverOverlay?: boolean;
|
||||
children?: JSX.Element;
|
||||
uploadElement?: ReactNode;
|
||||
dataTestId?: string;
|
||||
};
|
||||
|
||||
const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
const {
|
||||
imageDTO,
|
||||
onError,
|
||||
onClick,
|
||||
withMetadataOverlay = false,
|
||||
isDropDisabled = false,
|
||||
isDragDisabled = false,
|
||||
isUploadDisabled = false,
|
||||
minSize = 24,
|
||||
postUploadAction,
|
||||
imageSx,
|
||||
fitContainer = false,
|
||||
droppableData,
|
||||
draggableData,
|
||||
dropLabel,
|
||||
isSelected = false,
|
||||
isSelectedForCompare = false,
|
||||
thumbnail = false,
|
||||
noContentFallback = defaultNoContentFallback,
|
||||
uploadElement = defaultUploadElement,
|
||||
useThumbailFallback,
|
||||
withHoverOverlay = false,
|
||||
children,
|
||||
dataTestId,
|
||||
...rest
|
||||
} = props;
|
||||
|
||||
const openInNewTab = useCallback(
|
||||
(e: MouseEvent) => {
|
||||
if (!imageDTO) {
|
||||
return;
|
||||
}
|
||||
if (e.button !== 1) {
|
||||
return;
|
||||
}
|
||||
window.open(imageDTO.image_url, '_blank');
|
||||
},
|
||||
[imageDTO]
|
||||
);
|
||||
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
useImageContextMenu(imageDTO, ref);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
ref={ref}
|
||||
width="full"
|
||||
height="full"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
position="relative"
|
||||
minW={minSize ? minSize : undefined}
|
||||
minH={minSize ? minSize : undefined}
|
||||
userSelect="none"
|
||||
cursor={isDragDisabled || !imageDTO ? 'default' : 'pointer'}
|
||||
sx={withHoverOverlay ? sx : baseStyles}
|
||||
data-selected={isSelectedForCompare ? 'selectedForCompare' : isSelected ? 'selected' : undefined}
|
||||
{...rest}
|
||||
>
|
||||
{imageDTO && (
|
||||
<Flex
|
||||
className="gallery-image-container"
|
||||
w="full"
|
||||
h="full"
|
||||
position={fitContainer ? 'absolute' : 'relative'}
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
>
|
||||
<Image
|
||||
src={thumbnail ? imageDTO.thumbnail_url : imageDTO.image_url}
|
||||
fallbackStrategy="beforeLoadOrError"
|
||||
fallbackSrc={useThumbailFallback ? imageDTO.thumbnail_url : undefined}
|
||||
fallback={useThumbailFallback ? undefined : <IAILoadingImageFallback image={imageDTO} />}
|
||||
onError={onError}
|
||||
draggable={false}
|
||||
w={imageDTO.width}
|
||||
objectFit="contain"
|
||||
maxW="full"
|
||||
maxH="full"
|
||||
borderRadius="base"
|
||||
sx={imageSx}
|
||||
data-testid={dataTestId}
|
||||
/>
|
||||
{withMetadataOverlay && <ImageMetadataOverlay imageDTO={imageDTO} />}
|
||||
</Flex>
|
||||
)}
|
||||
{!imageDTO && !isUploadDisabled && (
|
||||
<UploadButton
|
||||
isUploadDisabled={isUploadDisabled}
|
||||
postUploadAction={postUploadAction}
|
||||
uploadElement={uploadElement}
|
||||
minSize={minSize}
|
||||
/>
|
||||
)}
|
||||
{!imageDTO && isUploadDisabled && noContentFallback}
|
||||
{imageDTO && !isDragDisabled && (
|
||||
<IAIDraggable
|
||||
data={draggableData}
|
||||
disabled={isDragDisabled || !imageDTO}
|
||||
onClick={onClick}
|
||||
onAuxClick={openInNewTab}
|
||||
/>
|
||||
)}
|
||||
{children}
|
||||
{!isDropDisabled && <IAIDroppable data={droppableData} disabled={isDropDisabled} dropLabel={dropLabel} />}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(IAIDndImage);
|
||||
|
||||
const UploadButton = memo(
|
||||
({
|
||||
isUploadDisabled,
|
||||
postUploadAction,
|
||||
uploadElement,
|
||||
minSize,
|
||||
}: {
|
||||
isUploadDisabled: boolean;
|
||||
postUploadAction?: PostUploadAction;
|
||||
uploadElement: ReactNode;
|
||||
minSize: number;
|
||||
}) => {
|
||||
const { getUploadButtonProps, getUploadInputProps } = useImageUploadButton({
|
||||
postUploadAction,
|
||||
isDisabled: isUploadDisabled,
|
||||
});
|
||||
|
||||
const uploadButtonStyles = useMemo<SystemStyleObject>(() => {
|
||||
const styles: SystemStyleObject = {
|
||||
minH: minSize,
|
||||
w: 'full',
|
||||
h: 'full',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
borderRadius: 'base',
|
||||
transitionProperty: 'common',
|
||||
transitionDuration: '0.1s',
|
||||
color: 'base.500',
|
||||
};
|
||||
if (!isUploadDisabled) {
|
||||
Object.assign(styles, {
|
||||
cursor: 'pointer',
|
||||
bg: 'base.700',
|
||||
_hover: {
|
||||
bg: 'base.650',
|
||||
color: 'base.300',
|
||||
},
|
||||
});
|
||||
}
|
||||
return styles;
|
||||
}, [isUploadDisabled, minSize]);
|
||||
|
||||
return (
|
||||
<Flex sx={uploadButtonStyles} {...getUploadButtonProps()}>
|
||||
<input {...getUploadInputProps()} />
|
||||
{uploadElement}
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
UploadButton.displayName = 'UploadButton';
|
||||
@@ -1,38 +0,0 @@
|
||||
import type { BoxProps } from '@invoke-ai/ui-library';
|
||||
import { Box } from '@invoke-ai/ui-library';
|
||||
import { useDraggableTypesafe } from 'features/dnd/hooks/typesafeHooks';
|
||||
import type { TypesafeDraggableData } from 'features/dnd/types';
|
||||
import { memo, useRef } from 'react';
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
|
||||
type IAIDraggableProps = BoxProps & {
|
||||
disabled?: boolean;
|
||||
data?: TypesafeDraggableData;
|
||||
};
|
||||
|
||||
const IAIDraggable = (props: IAIDraggableProps) => {
|
||||
const { data, disabled, ...rest } = props;
|
||||
const dndId = useRef(uuidv4());
|
||||
|
||||
const { attributes, listeners, setNodeRef } = useDraggableTypesafe({
|
||||
id: dndId.current,
|
||||
disabled,
|
||||
data,
|
||||
});
|
||||
|
||||
return (
|
||||
<Box
|
||||
ref={setNodeRef}
|
||||
position="absolute"
|
||||
w="full"
|
||||
h="full"
|
||||
top={0}
|
||||
insetInlineStart={0}
|
||||
{...attributes}
|
||||
{...listeners}
|
||||
{...rest}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(IAIDraggable);
|
||||
@@ -1,64 +0,0 @@
|
||||
import { Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { memo } from 'react';
|
||||
|
||||
type Props = {
|
||||
isOver: boolean;
|
||||
label?: string;
|
||||
withBackdrop?: boolean;
|
||||
};
|
||||
|
||||
const IAIDropOverlay = (props: Props) => {
|
||||
const { isOver, label, withBackdrop = true } = props;
|
||||
return (
|
||||
<Flex position="absolute" top={0} right={0} bottom={0} left={0}>
|
||||
<Flex
|
||||
position="absolute"
|
||||
top={0}
|
||||
right={0}
|
||||
bottom={0}
|
||||
left={0}
|
||||
w="full"
|
||||
h="full"
|
||||
bg={withBackdrop ? 'base.900' : 'transparent'}
|
||||
opacity={0.7}
|
||||
borderRadius="base"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
transitionProperty="common"
|
||||
transitionDuration="0.1s"
|
||||
/>
|
||||
|
||||
<Flex
|
||||
position="absolute"
|
||||
top={0.5}
|
||||
right={0.5}
|
||||
bottom={0.5}
|
||||
left={0.5}
|
||||
opacity={1}
|
||||
borderWidth={1.5}
|
||||
borderColor={isOver ? 'invokeYellow.300' : 'base.500'}
|
||||
borderRadius="base"
|
||||
borderStyle="dashed"
|
||||
transitionProperty="common"
|
||||
transitionDuration="0.1s"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
>
|
||||
{label && (
|
||||
<Text
|
||||
fontSize="lg"
|
||||
fontWeight="semibold"
|
||||
color={isOver ? 'invokeYellow.300' : 'base.500'}
|
||||
transitionProperty="common"
|
||||
transitionDuration="0.1s"
|
||||
textAlign="center"
|
||||
>
|
||||
{label}
|
||||
</Text>
|
||||
)}
|
||||
</Flex>
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(IAIDropOverlay);
|
||||
@@ -1,46 +0,0 @@
|
||||
import { Box } from '@invoke-ai/ui-library';
|
||||
import { useDroppableTypesafe } from 'features/dnd/hooks/typesafeHooks';
|
||||
import type { TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { isValidDrop } from 'features/dnd/util/isValidDrop';
|
||||
import { AnimatePresence } from 'framer-motion';
|
||||
import { memo, useRef } from 'react';
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
|
||||
import IAIDropOverlay from './IAIDropOverlay';
|
||||
|
||||
type IAIDroppableProps = {
|
||||
dropLabel?: string;
|
||||
disabled?: boolean;
|
||||
data?: TypesafeDroppableData;
|
||||
};
|
||||
|
||||
const IAIDroppable = (props: IAIDroppableProps) => {
|
||||
const { dropLabel, data, disabled } = props;
|
||||
const dndId = useRef(uuidv4());
|
||||
|
||||
const { isOver, setNodeRef, active } = useDroppableTypesafe({
|
||||
id: dndId.current,
|
||||
disabled,
|
||||
data,
|
||||
});
|
||||
|
||||
return (
|
||||
<Box
|
||||
ref={setNodeRef}
|
||||
position="absolute"
|
||||
top={0}
|
||||
right={0}
|
||||
bottom={0}
|
||||
left={0}
|
||||
w="full"
|
||||
h="full"
|
||||
pointerEvents={active ? 'auto' : 'none'}
|
||||
>
|
||||
<AnimatePresence>
|
||||
{isValidDrop(data, active?.data.current) && <IAIDropOverlay isOver={isOver} label={dropLabel} />}
|
||||
</AnimatePresence>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(IAIDroppable);
|
||||
@@ -1,24 +0,0 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Box, Skeleton } from '@invoke-ai/ui-library';
|
||||
import { memo } from 'react';
|
||||
|
||||
const skeletonStyles: SystemStyleObject = {
|
||||
position: 'relative',
|
||||
height: 'full',
|
||||
width: 'full',
|
||||
'::before': {
|
||||
content: "''",
|
||||
display: 'block',
|
||||
pt: '100%',
|
||||
},
|
||||
};
|
||||
|
||||
const IAIFillSkeleton = () => {
|
||||
return (
|
||||
<Skeleton sx={skeletonStyles}>
|
||||
<Box position="absolute" top={0} insetInlineStart={0} height="full" width="full" />
|
||||
</Skeleton>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(IAIFillSkeleton);
|
||||
@@ -6,7 +6,7 @@ import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
type Props = { image: ImageDTO | undefined };
|
||||
|
||||
export const IAILoadingImageFallback = memo((props: Props) => {
|
||||
const IAILoadingImageFallback = memo((props: Props) => {
|
||||
if (props.image) {
|
||||
return (
|
||||
<Skeleton
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
import { Badge, Flex } from '@invoke-ai/ui-library';
|
||||
import { memo } from 'react';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
type ImageMetadataOverlayProps = {
|
||||
imageDTO: ImageDTO;
|
||||
};
|
||||
|
||||
const ImageMetadataOverlay = ({ imageDTO }: ImageMetadataOverlayProps) => {
|
||||
return (
|
||||
<Flex
|
||||
pointerEvents="none"
|
||||
flexDirection="column"
|
||||
position="absolute"
|
||||
top={0}
|
||||
insetInlineStart={0}
|
||||
p={2}
|
||||
alignItems="flex-start"
|
||||
gap={2}
|
||||
>
|
||||
<Badge variant="solid" colorScheme="base">
|
||||
{imageDTO.width} × {imageDTO.height}
|
||||
</Badge>
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(ImageMetadataOverlay);
|
||||
@@ -1,89 +0,0 @@
|
||||
import { Box, Flex, Heading } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectSelectedBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { selectMaxImageUploadCount } from 'features/system/store/configSlice';
|
||||
import { memo } from 'react';
|
||||
import type { DropzoneState } from 'react-dropzone';
|
||||
import { useHotkeys } from 'react-hotkeys-hook';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useBoardName } from 'services/api/hooks/useBoardName';
|
||||
|
||||
type ImageUploadOverlayProps = {
|
||||
dropzone: DropzoneState;
|
||||
setIsHandlingUpload: (isHandlingUpload: boolean) => void;
|
||||
};
|
||||
|
||||
const ImageUploadOverlay = (props: ImageUploadOverlayProps) => {
|
||||
const { dropzone, setIsHandlingUpload } = props;
|
||||
|
||||
useHotkeys(
|
||||
'esc',
|
||||
() => {
|
||||
setIsHandlingUpload(false);
|
||||
},
|
||||
[setIsHandlingUpload]
|
||||
);
|
||||
|
||||
return (
|
||||
<Box position="absolute" top={0} right={0} bottom={0} left={0} zIndex={999} backdropFilter="blur(20px)">
|
||||
<Flex position="absolute" top={0} right={0} bottom={0} left={0} bg="base.900" opacity={0.7} />
|
||||
<Flex
|
||||
position="absolute"
|
||||
flexDir="column"
|
||||
gap={4}
|
||||
top={2}
|
||||
right={2}
|
||||
bottom={2}
|
||||
left={2}
|
||||
opacity={1}
|
||||
borderWidth={2}
|
||||
borderColor={dropzone.isDragAccept ? 'invokeYellow.300' : 'error.500'}
|
||||
borderRadius="base"
|
||||
borderStyle="dashed"
|
||||
transitionProperty="common"
|
||||
transitionDuration="0.1s"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
color={dropzone.isDragReject ? 'error.300' : undefined}
|
||||
>
|
||||
{dropzone.isDragAccept && <DragAcceptMessage />}
|
||||
{!dropzone.isDragAccept && <DragRejectMessage />}
|
||||
</Flex>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
export default memo(ImageUploadOverlay);
|
||||
|
||||
const DragAcceptMessage = () => {
|
||||
const { t } = useTranslation();
|
||||
const selectedBoardId = useAppSelector(selectSelectedBoardId);
|
||||
const boardName = useBoardName(selectedBoardId);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Heading size="lg">{t('gallery.dropToUpload')}</Heading>
|
||||
<Heading size="md">{t('toast.imagesWillBeAddedTo', { boardName })}</Heading>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
const DragRejectMessage = () => {
|
||||
const { t } = useTranslation();
|
||||
const maxImageUploadCount = useAppSelector(selectMaxImageUploadCount);
|
||||
|
||||
if (maxImageUploadCount === undefined) {
|
||||
return (
|
||||
<>
|
||||
<Heading size="lg">{t('toast.invalidUpload')}</Heading>
|
||||
<Heading size="md">{t('toast.uploadFailedInvalidUploadDesc')}</Heading>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<Heading size="lg">{t('toast.invalidUpload')}</Heading>
|
||||
<Heading size="md">{t('toast.uploadFailedInvalidUploadDesc_withCount', { count: maxImageUploadCount })}</Heading>
|
||||
</>
|
||||
);
|
||||
};
|
||||
@@ -1,5 +1,6 @@
|
||||
import type { PopoverProps } from '@invoke-ai/ui-library';
|
||||
import commercialLicenseBg from 'public/assets/images/commercial-license-bg.png';
|
||||
import denoisingStrength from 'public/assets/images/denoising-strength.png';
|
||||
|
||||
export type Feature =
|
||||
| 'clipSkip'
|
||||
@@ -125,7 +126,7 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings',
|
||||
},
|
||||
infillMethod: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158841-infill-and-scaling',
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings',
|
||||
},
|
||||
scaleBeforeProcessing: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000158841',
|
||||
@@ -138,6 +139,7 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = {
|
||||
},
|
||||
paramDenoisingStrength: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000094998-image-to-image',
|
||||
image: denoisingStrength,
|
||||
},
|
||||
paramHrf: {
|
||||
href: 'https://support.invoke.ai/support/solutions/articles/151000096700-how-can-i-get-larger-images-what-does-upscaling-do-',
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
import { combine } from '@atlaskit/pragmatic-drag-and-drop/combine';
|
||||
import { autoScrollForElements } from '@atlaskit/pragmatic-drag-and-drop-auto-scroll/element';
|
||||
import { autoScrollForExternal } from '@atlaskit/pragmatic-drag-and-drop-auto-scroll/external';
|
||||
import type { ChakraProps } from '@invoke-ai/ui-library';
|
||||
import { Box, Flex } from '@invoke-ai/ui-library';
|
||||
import { getOverlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants';
|
||||
import type { OverlayScrollbarsComponentRef } from 'overlayscrollbars-react';
|
||||
import { OverlayScrollbarsComponent } from 'overlayscrollbars-react';
|
||||
import type { CSSProperties, PropsWithChildren } from 'react';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { memo, useEffect, useMemo, useState } from 'react';
|
||||
|
||||
type Props = PropsWithChildren & {
|
||||
maxHeight?: ChakraProps['maxHeight'];
|
||||
@@ -11,17 +15,38 @@ type Props = PropsWithChildren & {
|
||||
overflowY?: 'hidden' | 'scroll';
|
||||
};
|
||||
|
||||
const styles: CSSProperties = { height: '100%', width: '100%' };
|
||||
const styles: CSSProperties = { position: 'absolute', top: 0, left: 0, right: 0, bottom: 0 };
|
||||
|
||||
const ScrollableContent = ({ children, maxHeight, overflowX = 'hidden', overflowY = 'scroll' }: Props) => {
|
||||
const overlayscrollbarsOptions = useMemo(
|
||||
() => getOverlayScrollbarsParams(overflowX, overflowY).options,
|
||||
[overflowX, overflowY]
|
||||
);
|
||||
const [os, osRef] = useState<OverlayScrollbarsComponentRef | null>(null);
|
||||
useEffect(() => {
|
||||
const osInstance = os?.osInstance();
|
||||
|
||||
if (!osInstance) {
|
||||
return;
|
||||
}
|
||||
|
||||
const element = osInstance.elements().viewport;
|
||||
|
||||
// `pragmatic-drag-and-drop-auto-scroll` requires the element to have `overflow-y: scroll` or `overflow-y: auto`
|
||||
// else it logs an ugly warning. In our case, using a custom scrollbar library, it will be 'hidden' by default.
|
||||
// To prevent the erroneous warning, we temporarily set the overflow-y to 'scroll' and then revert it back.
|
||||
const overflowY = element.style.overflowY; // starts 'hidden'
|
||||
element.style.setProperty('overflow-y', 'scroll', 'important');
|
||||
const cleanup = combine(autoScrollForElements({ element }), autoScrollForExternal({ element }));
|
||||
element.style.setProperty('overflow-y', overflowY);
|
||||
|
||||
return cleanup;
|
||||
}, [os]);
|
||||
|
||||
return (
|
||||
<Flex w="full" h="full" maxHeight={maxHeight} position="relative">
|
||||
<Box position="absolute" top={0} left={0} right={0} bottom={0}>
|
||||
<OverlayScrollbarsComponent defer style={styles} options={overlayscrollbarsOptions}>
|
||||
<OverlayScrollbarsComponent ref={osRef} style={styles} options={overlayscrollbarsOptions}>
|
||||
{children}
|
||||
</OverlayScrollbarsComponent>
|
||||
</Box>
|
||||
|
||||
@@ -2,6 +2,7 @@ import { deepClone } from 'common/util/deepClone';
|
||||
import { merge } from 'lodash-es';
|
||||
import { ClickScrollPlugin, OverlayScrollbars } from 'overlayscrollbars';
|
||||
import type { UseOverlayScrollbarsParams } from 'overlayscrollbars-react';
|
||||
import type { CSSProperties } from 'react';
|
||||
|
||||
OverlayScrollbars.plugin(ClickScrollPlugin);
|
||||
|
||||
@@ -27,3 +28,8 @@ export const getOverlayScrollbarsParams = (
|
||||
merge(params, { options: { overflow: { y: overflowY, x: overflowX } } });
|
||||
return params;
|
||||
};
|
||||
|
||||
export const overlayScrollbarsStyles: CSSProperties = {
|
||||
height: '100%',
|
||||
width: '100%',
|
||||
};
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import {
|
||||
useNewCanvasSession,
|
||||
useNewGallerySession,
|
||||
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
|
||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
||||
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowsCounterClockwiseBold, PiFilePlusBold } from 'react-icons/pi';
|
||||
|
||||
export const SessionMenuItems = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const { newGallerySessionWithDialog } = useNewGallerySession();
|
||||
const { newCanvasSessionWithDialog } = useNewCanvasSession();
|
||||
const resetCanvasLayers = useCallback(() => {
|
||||
dispatch(canvasReset());
|
||||
}, [dispatch]);
|
||||
const resetGenerationSettings = useCallback(() => {
|
||||
dispatch(paramsReset());
|
||||
}, [dispatch]);
|
||||
return (
|
||||
<>
|
||||
<MenuItem icon={<PiFilePlusBold />} onClick={newGallerySessionWithDialog}>
|
||||
{t('controlLayers.newGallerySession')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiFilePlusBold />} onClick={newCanvasSessionWithDialog}>
|
||||
{t('controlLayers.newCanvasSession')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClick={resetCanvasLayers}>
|
||||
{t('controlLayers.resetCanvasLayers')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClick={resetGenerationSettings}>
|
||||
{t('controlLayers.resetGenerationSettings')}
|
||||
</MenuItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
SessionMenuItems.displayName = 'SessionMenuItems';
|
||||
57
invokeai/frontend/web/src/common/components/WavyLine.tsx
Normal file
57
invokeai/frontend/web/src/common/components/WavyLine.tsx
Normal file
@@ -0,0 +1,57 @@
|
||||
type Props = {
|
||||
/**
|
||||
* The amplitude of the wave. 0 is a straight line, higher values create more pronounced waves.
|
||||
*/
|
||||
amplitude: number;
|
||||
/**
|
||||
* The number of segments in the line. More segments create a smoother wave.
|
||||
*/
|
||||
segments?: number;
|
||||
/**
|
||||
* The color of the wave.
|
||||
*/
|
||||
stroke: string;
|
||||
/**
|
||||
* The width of the wave.
|
||||
*/
|
||||
strokeWidth: number;
|
||||
/**
|
||||
* The width of the SVG.
|
||||
*/
|
||||
width: number;
|
||||
/**
|
||||
* The height of the SVG.
|
||||
*/
|
||||
height: number;
|
||||
};
|
||||
|
||||
const WavyLine = ({ amplitude, stroke, strokeWidth, width, height, segments = 5 }: Props) => {
|
||||
// Calculate the path dynamically based on waviness
|
||||
const generatePath = () => {
|
||||
if (amplitude === 0) {
|
||||
// If waviness is 0, return a straight line
|
||||
return `M0,${height / 2} L${width},${height / 2}`;
|
||||
}
|
||||
|
||||
const clampedAmplitude = Math.min(height / 2, amplitude); // Cap amplitude to half the height
|
||||
const segmentWidth = width / segments;
|
||||
let path = `M0,${height / 2}`; // Start in the middle of the left edge
|
||||
|
||||
// Loop through each segment and alternate the y position to create waves
|
||||
for (let i = 1; i <= segments; i++) {
|
||||
const x = i * segmentWidth;
|
||||
const y = height / 2 + (i % 2 === 0 ? clampedAmplitude : -clampedAmplitude);
|
||||
path += ` Q${x - segmentWidth / 2},${y} ${x},${height / 2}`;
|
||||
}
|
||||
|
||||
return path;
|
||||
};
|
||||
|
||||
return (
|
||||
<svg width={width} height={height} viewBox={`0 0 ${width} ${height}`} xmlns="http://www.w3.org/2000/svg">
|
||||
<path d={generatePath()} fill="none" stroke={stroke} strokeWidth={strokeWidth} />
|
||||
</svg>
|
||||
);
|
||||
};
|
||||
|
||||
export default WavyLine;
|
||||
@@ -46,7 +46,7 @@ const REGION_TARGETS: Record<FocusRegionName, Set<HTMLElement>> = {
|
||||
/**
|
||||
* The currently-focused region or `null` if no region is focused.
|
||||
*/
|
||||
const $focusedRegion = atom<FocusRegionName | null>(null);
|
||||
export const $focusedRegion = atom<FocusRegionName | null>(null);
|
||||
|
||||
/**
|
||||
* A map of focus regions to atoms that indicate if that region is focused.
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { selectMaxImageUploadCount } from 'features/system/store/configSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import { useCallback, useEffect, useState } from 'react';
|
||||
import type { Accept, FileRejection } from 'react-dropzone';
|
||||
import { useDropzone } from 'react-dropzone';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useUploadImageMutation } from 'services/api/endpoints/images';
|
||||
import type { PostUploadAction } from 'services/api/types';
|
||||
|
||||
const log = logger('gallery');
|
||||
|
||||
const accept: Accept = {
|
||||
'image/png': ['.png'],
|
||||
'image/jpeg': ['.jpg', '.jpeg', '.png'],
|
||||
};
|
||||
|
||||
export const useFullscreenDropzone = () => {
|
||||
useAssertSingleton('useFullscreenDropzone');
|
||||
const { t } = useTranslation();
|
||||
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
|
||||
const [isHandlingUpload, setIsHandlingUpload] = useState<boolean>(false);
|
||||
const [uploadImage] = useUploadImageMutation();
|
||||
const activeTabName = useAppSelector(selectActiveTab);
|
||||
const maxImageUploadCount = useAppSelector(selectMaxImageUploadCount);
|
||||
|
||||
const getPostUploadAction = useCallback((): PostUploadAction => {
|
||||
if (activeTabName === 'upscaling') {
|
||||
return { type: 'SET_UPSCALE_INITIAL_IMAGE' };
|
||||
} else {
|
||||
return { type: 'TOAST' };
|
||||
}
|
||||
}, [activeTabName]);
|
||||
|
||||
const onDrop = useCallback(
|
||||
(acceptedFiles: Array<File>, fileRejections: Array<FileRejection>) => {
|
||||
if (fileRejections.length > 0) {
|
||||
const errors = fileRejections.map((rejection) => ({
|
||||
errors: rejection.errors.map(({ message }) => message),
|
||||
file: rejection.file.path,
|
||||
}));
|
||||
log.error({ errors }, 'Invalid upload');
|
||||
const description =
|
||||
maxImageUploadCount === undefined
|
||||
? t('toast.uploadFailedInvalidUploadDesc')
|
||||
: t('toast.uploadFailedInvalidUploadDesc_withCount', { count: maxImageUploadCount });
|
||||
|
||||
toast({
|
||||
id: 'UPLOAD_FAILED',
|
||||
title: t('toast.uploadFailed'),
|
||||
description,
|
||||
status: 'error',
|
||||
});
|
||||
|
||||
setIsHandlingUpload(false);
|
||||
return;
|
||||
}
|
||||
|
||||
for (const [i, file] of acceptedFiles.entries()) {
|
||||
uploadImage({
|
||||
file,
|
||||
image_category: 'user',
|
||||
is_intermediate: false,
|
||||
postUploadAction: getPostUploadAction(),
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
// The `imageUploaded` listener does some extra logic, like switching to the asset view on upload on the
|
||||
// first upload of a "batch".
|
||||
isFirstUploadOfBatch: i === 0,
|
||||
});
|
||||
}
|
||||
|
||||
setIsHandlingUpload(false);
|
||||
},
|
||||
[t, maxImageUploadCount, uploadImage, getPostUploadAction, autoAddBoardId]
|
||||
);
|
||||
|
||||
const onDragOver = useCallback(() => {
|
||||
setIsHandlingUpload(true);
|
||||
}, []);
|
||||
|
||||
const onDragLeave = useCallback(() => {
|
||||
setIsHandlingUpload(false);
|
||||
}, []);
|
||||
|
||||
const dropzone = useDropzone({
|
||||
accept,
|
||||
noClick: true,
|
||||
onDrop,
|
||||
onDragOver,
|
||||
onDragLeave,
|
||||
noKeyboard: true,
|
||||
multiple: maxImageUploadCount === undefined || maxImageUploadCount > 1,
|
||||
maxFiles: maxImageUploadCount,
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
// This is a hack to allow pasting images into the uploader
|
||||
const handlePaste = (e: ClipboardEvent) => {
|
||||
if (!dropzone.inputRef.current) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (e.clipboardData?.files) {
|
||||
// Set the files on the dropzone.inputRef
|
||||
dropzone.inputRef.current.files = e.clipboardData.files;
|
||||
// Dispatch the change event, dropzone catches this and we get to use its own validation
|
||||
dropzone.inputRef.current?.dispatchEvent(new Event('change', { bubbles: true }));
|
||||
}
|
||||
};
|
||||
|
||||
// Add the paste event listener
|
||||
document.addEventListener('paste', handlePaste);
|
||||
|
||||
return () => {
|
||||
document.removeEventListener('paste', handlePaste);
|
||||
};
|
||||
}, [dropzone.inputRef]);
|
||||
|
||||
return { dropzone, isHandlingUpload, setIsHandlingUpload };
|
||||
};
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user