mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-17 08:57:57 -05:00
Compare commits
345 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4bfa6439d4 | ||
|
|
a8d7969a1d | ||
|
|
46bfa24af3 | ||
|
|
a8cb8e128d | ||
|
|
8cef0f5bf5 | ||
|
|
911baeb58b | ||
|
|
312960645b | ||
|
|
50cf285efb | ||
|
|
a214f4fff5 | ||
|
|
2981591c36 | ||
|
|
b08f90c99f | ||
|
|
ab8c739cd8 | ||
|
|
5c5108c28a | ||
|
|
3df7cfd605 | ||
|
|
1ff3d44dba | ||
|
|
c80ad90f72 | ||
|
|
3b4d1b8786 | ||
|
|
c66201c7e1 | ||
|
|
35c7c59455 | ||
|
|
85f98ab3eb | ||
|
|
dac75685be | ||
|
|
d7b5a8b298 | ||
|
|
d3ecaa740f | ||
|
|
b5a6765a3d | ||
|
|
3704573ef8 | ||
|
|
01fbf2ce4d | ||
|
|
96e7003449 | ||
|
|
80197b8856 | ||
|
|
0187bc671e | ||
|
|
31584daabe | ||
|
|
a6cb522fed | ||
|
|
f70be1e415 | ||
|
|
a2901f2b46 | ||
|
|
b61c66c3a9 | ||
|
|
c77f9ec202 | ||
|
|
2c5c35647f | ||
|
|
bf0fdbd10e | ||
|
|
731d317a42 | ||
|
|
e81579f752 | ||
|
|
9a10e98c0b | ||
|
|
27fdc139b7 | ||
|
|
0a00805afc | ||
|
|
7b38143fbd | ||
|
|
4c5ad1b7d7 | ||
|
|
d80cc962ad | ||
|
|
7ccabfa200 | ||
|
|
936d59cc52 | ||
|
|
fc16fb6099 | ||
|
|
c848cbc2e3 | ||
|
|
66fd0f0d8a | ||
|
|
c266f39f06 | ||
|
|
98a44fa4d7 | ||
|
|
c1d230f961 | ||
|
|
68108435ae | ||
|
|
e121bf1f62 | ||
|
|
4835c344b3 | ||
|
|
a589dec122 | ||
|
|
bc67d5c841 | ||
|
|
f3d5691c04 | ||
|
|
b98abc2457 | ||
|
|
7e527ccfb7 | ||
|
|
0f0c911845 | ||
|
|
e4818b967b | ||
|
|
ce3eede26f | ||
|
|
d98725c5e9 | ||
|
|
31a96d2945 | ||
|
|
845a321a43 | ||
|
|
87a44a28ef | ||
|
|
d5b9c3ee5a | ||
|
|
91db136cd1 | ||
|
|
f351ad4b66 | ||
|
|
fb6fb9abbd | ||
|
|
675c990486 | ||
|
|
6ee5cde4bb | ||
|
|
c8077f9430 | ||
|
|
6aabe9959e | ||
|
|
0b58d172d2 | ||
|
|
d7c6e293d7 | ||
|
|
c600bc867d | ||
|
|
f4140dd772 | ||
|
|
a2d8261d40 | ||
|
|
bce88a8873 | ||
|
|
b37e1a3ad6 | ||
|
|
35a088e0a6 | ||
|
|
b936cab039 | ||
|
|
34e4093408 | ||
|
|
d7f93c3cc0 | ||
|
|
d4c4926caa | ||
|
|
558c7db055 | ||
|
|
2ece59b51b | ||
|
|
7dbe39957c | ||
|
|
6fa46d35a5 | ||
|
|
b2a2b38ea8 | ||
|
|
12934da390 | ||
|
|
231bc18188 | ||
|
|
530cd180c5 | ||
|
|
2a92e7b920 | ||
|
|
019e057e29 | ||
|
|
9aa26f883e | ||
|
|
3f727e24b1 | ||
|
|
9e90bf1b20 | ||
|
|
db3964797f | ||
|
|
881efbda1b | ||
|
|
e9ce2ed5f2 | ||
|
|
53ac9eafbf | ||
|
|
9e095006a5 | ||
|
|
21b24c3ba6 | ||
|
|
139ecc10ce | ||
|
|
78ea143b46 | ||
|
|
174249ec15 | ||
|
|
2510ad7431 | ||
|
|
ba5e855a60 | ||
|
|
23627cf18d | ||
|
|
5e20c9a1ca | ||
|
|
933cf5f276 | ||
|
|
41316de659 | ||
|
|
041ccfd68e | ||
|
|
ad24c203a4 | ||
|
|
3fd28ce600 | ||
|
|
32df3bdf6e | ||
|
|
ba69e89e8c | ||
|
|
a8e0c48ddc | ||
|
|
66f6571086 | ||
|
|
8a3848e7b6 | ||
|
|
3f8486b480 | ||
|
|
b80be4f639 | ||
|
|
adb3a849b9 | ||
|
|
798499fda6 | ||
|
|
02fc5a165c | ||
|
|
b1b8edecfb | ||
|
|
3cd8d48809 | ||
|
|
f4672ad8c1 | ||
|
|
5a86490845 | ||
|
|
27dc843046 | ||
|
|
2f35d74902 | ||
|
|
8bd52ed744 | ||
|
|
f3e2a3c384 | ||
|
|
ecc6e8a532 | ||
|
|
9170576a38 | ||
|
|
f26baa0341 | ||
|
|
99dad953a4 | ||
|
|
c39bcdffd3 | ||
|
|
32f2223237 | ||
|
|
6176941853 | ||
|
|
af41dc83f7 | ||
|
|
a17e771eba | ||
|
|
19ecdb196e | ||
|
|
15880e6ea7 | ||
|
|
53ffa98662 | ||
|
|
021a334240 | ||
|
|
cfed293d48 | ||
|
|
d36bc185c8 | ||
|
|
7878203b03 | ||
|
|
3352220d39 | ||
|
|
bcfb1e7e52 | ||
|
|
e84b3c142c | ||
|
|
22f637b647 | ||
|
|
5d192ab6e5 | ||
|
|
9273d1629e | ||
|
|
27a12f080b | ||
|
|
3bfb497764 | ||
|
|
b849c7d382 | ||
|
|
8d4120583d | ||
|
|
402cdc7eda | ||
|
|
b02ea1a898 | ||
|
|
d709040f4b | ||
|
|
8a7a498da3 | ||
|
|
699736486b | ||
|
|
37e790ae19 | ||
|
|
6c0bd7d150 | ||
|
|
99e154d773 | ||
|
|
e4e43ae126 | ||
|
|
a07fac6180 | ||
|
|
93d4b00082 | ||
|
|
8abcc99ced | ||
|
|
73ab4b8895 | ||
|
|
86719f2065 | ||
|
|
5271fc1cac | ||
|
|
96ff7d9093 | ||
|
|
6f73d9e9c6 | ||
|
|
29b406a84b | ||
|
|
2b1e4b88d3 | ||
|
|
0f0085a776 | ||
|
|
ea28ed8261 | ||
|
|
c0e6327d3a | ||
|
|
459491e402 | ||
|
|
a4cddfa47d | ||
|
|
9a822bcfe8 | ||
|
|
5f12b9185f | ||
|
|
d958d2e5a0 | ||
|
|
823ca214e6 | ||
|
|
a33da450fd | ||
|
|
8b5f4d190c | ||
|
|
f1f3b7965a | ||
|
|
987be3507c | ||
|
|
1f4090fe0e | ||
|
|
029e2d2c46 | ||
|
|
7722f479e8 | ||
|
|
3ad4072183 | ||
|
|
6dfb9a1906 | ||
|
|
ad2924350d | ||
|
|
3bf51ee0c2 | ||
|
|
fce5051dcc | ||
|
|
446d8818b9 | ||
|
|
1566e29c19 | ||
|
|
6a2e35f2c4 | ||
|
|
b6d58774f4 | ||
|
|
758f94d3c6 | ||
|
|
9df0871754 | ||
|
|
3011150a3a | ||
|
|
05aa1fce71 | ||
|
|
df81f3274a | ||
|
|
143487a492 | ||
|
|
203fa04295 | ||
|
|
954fce3c67 | ||
|
|
821889148a | ||
|
|
4c248d8c2c | ||
|
|
deb75805d4 | ||
|
|
93110654da | ||
|
|
ff0c48d532 | ||
|
|
de18073814 | ||
|
|
0708af9545 | ||
|
|
1e85184c62 | ||
|
|
11d3b8d944 | ||
|
|
bffd4afb96 | ||
|
|
518a896521 | ||
|
|
2647ff141a | ||
|
|
ba0bac2aa5 | ||
|
|
862e2a3e49 | ||
|
|
d22fd32b05 | ||
|
|
391e5b7f8c | ||
|
|
c9d2a5f59a | ||
|
|
1f63b60021 | ||
|
|
a499b9f54e | ||
|
|
104505ea02 | ||
|
|
ee4002607c | ||
|
|
fd20582cdd | ||
|
|
43b0d07517 | ||
|
|
f83592a052 | ||
|
|
b3ee906749 | ||
|
|
5d69e9068a | ||
|
|
a79136b058 | ||
|
|
944af4d4a9 | ||
|
|
5e001be73a | ||
|
|
576a644b3a | ||
|
|
703557c8a6 | ||
|
|
d59a53b3f9 | ||
|
|
7b8f78c2d9 | ||
|
|
31ab9be79a | ||
|
|
5011fab85d | ||
|
|
92bdb9fdcc | ||
|
|
548e766c0b | ||
|
|
ff897f74a1 | ||
|
|
3d29c996ed | ||
|
|
42d57d1225 | ||
|
|
193fa9395a | ||
|
|
56cd839d5b | ||
|
|
7b446ee40d | ||
|
|
17027c4070 | ||
|
|
13d44f47ce | ||
|
|
550fbdeb1c | ||
|
|
a01cd7c497 | ||
|
|
c54afd600c | ||
|
|
4f911a0ea8 | ||
|
|
fb91f48722 | ||
|
|
69db60a614 | ||
|
|
c6d7f951aa | ||
|
|
04c005284c | ||
|
|
2d7f9697bf | ||
|
|
ae530492a2 | ||
|
|
87ed1e3b6d | ||
|
|
cc54466db9 | ||
|
|
cbdafe7e38 | ||
|
|
112cb76174 | ||
|
|
e56d41ab99 | ||
|
|
273dfd86ab | ||
|
|
871271fde5 | ||
|
|
14944872c4 | ||
|
|
07bcf3c446 | ||
|
|
8ed5585285 | ||
|
|
5ce226a467 | ||
|
|
c64f20a72b | ||
|
|
0c9c10a03a | ||
|
|
4a0df6b865 | ||
|
|
ba165572bf | ||
|
|
c3d6a10603 | ||
|
|
4efc86299d | ||
|
|
e8c7cf63fd | ||
|
|
698b034190 | ||
|
|
3988128c40 | ||
|
|
c768f47365 | ||
|
|
19a63abc54 | ||
|
|
75ec36bf9a | ||
|
|
d802f8e7fb | ||
|
|
6873e0308d | ||
|
|
66eb73088e | ||
|
|
ed81a13eb4 | ||
|
|
fbc1aae52d | ||
|
|
ba42c3e63f | ||
|
|
b24e820aa0 | ||
|
|
e8f6b3b77a | ||
|
|
8f13518c97 | ||
|
|
6afbc12074 | ||
|
|
6b0a56ceb9 | ||
|
|
ca92497e52 | ||
|
|
97d45ceaf2 | ||
|
|
aeb3841a6f | ||
|
|
c14d33d3c1 | ||
|
|
676e59e072 | ||
|
|
e7dcb6a03f | ||
|
|
fb95b7cc2b | ||
|
|
015dc3ac0d | ||
|
|
9d8a71b362 | ||
|
|
2eb212f393 | ||
|
|
34b268c15c | ||
|
|
9a203a64dc | ||
|
|
d80004e056 | ||
|
|
de32ed23a7 | ||
|
|
5aed2b315d | ||
|
|
48db6cfc4f | ||
|
|
aa7c5c281a | ||
|
|
87aeb7f889 | ||
|
|
3b3d6e413a | ||
|
|
b6432f2de3 | ||
|
|
9d0a28ccae | ||
|
|
c3bf0a3277 | ||
|
|
b516610c1e | ||
|
|
677e717cd7 | ||
|
|
c52584e057 | ||
|
|
b6767441db | ||
|
|
8745dbe67d | ||
|
|
a565d9473e | ||
|
|
4dbf07c3e0 | ||
|
|
f6eb4d9a6b | ||
|
|
5037967b82 | ||
|
|
4930ba48ce | ||
|
|
40d2092256 | ||
|
|
d2e9237740 | ||
|
|
b191b706c1 | ||
|
|
4d0f760ec8 | ||
|
|
65cda5365a | ||
|
|
1f2d1d086f | ||
|
|
418f3c3f19 | ||
|
|
72173e284c | ||
|
|
9cc13556aa |
29
.github/CODEOWNERS
vendored
29
.github/CODEOWNERS
vendored
@@ -1,32 +1,31 @@
|
||||
# continuous integration
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr @jazzhaiku
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr @jazzhaiku @psychedelicious
|
||||
|
||||
# documentation
|
||||
/docs/ @lstein @blessedcoolant @hipsterusername @psychedelicious
|
||||
/mkdocs.yml @lstein @blessedcoolant @hipsterusername @psychedelicious
|
||||
|
||||
# nodes
|
||||
/invokeai/app/ @blessedcoolant @psychedelicious @brandonrising @hipsterusername @jazzhaiku
|
||||
/invokeai/app/ @blessedcoolant @psychedelicious @hipsterusername @jazzhaiku
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||
/scripts/ @ebr @lstein @hipsterusername
|
||||
/installer/ @lstein @ebr @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @hipsterusername
|
||||
/invokeai/configs @lstein @hipsterusername
|
||||
/invokeai/version @lstein @blessedcoolant @hipsterusername
|
||||
/pyproject.toml @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @psychedelicious @hipsterusername @ebr
|
||||
/scripts/ @ebr @lstein @psychedelicious @hipsterusername
|
||||
/installer/ @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/configs @lstein @psychedelicious @hipsterusername
|
||||
/invokeai/version @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
|
||||
# web ui
|
||||
/invokeai/frontend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||
/invokeai/backend @blessedcoolant @psychedelicious @lstein @maryhipp @hipsterusername
|
||||
|
||||
# generation, model management, postprocessing
|
||||
/invokeai/backend @lstein @blessedcoolant @brandonrising @hipsterusername @jazzhaiku
|
||||
/invokeai/backend @lstein @blessedcoolant @hipsterusername @jazzhaiku @psychedelicious @maryhipp
|
||||
|
||||
# front ends
|
||||
/invokeai/frontend/CLI @lstein @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/CLI @lstein @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @psychedelicious @hipsterusername
|
||||
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
|
||||
|
||||
4
.github/workflows/python-checks.yml
vendored
4
.github/workflows/python-checks.yml
vendored
@@ -67,6 +67,10 @@ jobs:
|
||||
version: '0.6.10'
|
||||
enable-cache: true
|
||||
|
||||
- name: check pypi classifiers
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv run --no-project scripts/check_classifiers.py ./pyproject.toml
|
||||
|
||||
- name: ruff check
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: uv tool run ruff@0.11.2 check --output-format=github .
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -188,3 +188,4 @@ installer/install.sh
|
||||
installer/update.bat
|
||||
installer/update.sh
|
||||
installer/InvokeAI-Installer/
|
||||
.aider*
|
||||
|
||||
@@ -39,7 +39,7 @@ nodes imported in the `__init__.py` file are loaded. See the README in the nodes
|
||||
folder for more examples:
|
||||
|
||||
```py
|
||||
from .cool_node import CoolInvocation
|
||||
from .cool_node import ResizeInvocation
|
||||
```
|
||||
|
||||
## Creating A New Invocation
|
||||
@@ -69,7 +69,10 @@ The first set of things we need to do when creating a new Invocation are -
|
||||
So let us do that.
|
||||
|
||||
```python
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.invocation_api import (
|
||||
BaseInvocation,
|
||||
invocation,
|
||||
)
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -103,8 +106,12 @@ create your own custom field types later in this guide. For now, let's go ahead
|
||||
and use it.
|
||||
|
||||
```python
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.invocation_api import (
|
||||
BaseInvocation,
|
||||
ImageField,
|
||||
InputField,
|
||||
invocation,
|
||||
)
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -128,8 +135,12 @@ image: ImageField = InputField(description="The input image")
|
||||
Great. Now let us create our other inputs for `width` and `height`
|
||||
|
||||
```python
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.invocation_api import (
|
||||
BaseInvocation,
|
||||
ImageField,
|
||||
InputField,
|
||||
invocation,
|
||||
)
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -163,8 +174,13 @@ that are provided by it by InvokeAI.
|
||||
Let us create this function first.
|
||||
|
||||
```python
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation, InvocationContext
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.invocation_api import (
|
||||
BaseInvocation,
|
||||
ImageField,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
invocation,
|
||||
)
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -191,8 +207,14 @@ all the necessary info related to image outputs. So let us use that.
|
||||
We will cover how to create your own output types later in this guide.
|
||||
|
||||
```python
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation, InvocationContext
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.invocation_api import (
|
||||
BaseInvocation,
|
||||
ImageField,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
invocation,
|
||||
)
|
||||
|
||||
from invokeai.app.invocations.image import ImageOutput
|
||||
|
||||
@invocation('resize')
|
||||
@@ -217,9 +239,15 @@ Perfect. Now that we have our Invocation setup, let us do what we want to do.
|
||||
So let's do that.
|
||||
|
||||
```python
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation, InvocationContext
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.image import ImageOutput, ResourceOrigin, ImageCategory
|
||||
from invokeai.invocation_api import (
|
||||
BaseInvocation,
|
||||
ImageField,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
invocation,
|
||||
)
|
||||
|
||||
from invokeai.app.invocations.image import ImageOutput
|
||||
|
||||
@invocation("resize")
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
|
||||
@@ -71,7 +71,14 @@ The following commands vary depending on the version of Invoke being installed a
|
||||
|
||||
7. Determine the `PyPI` index URL to use for installation, if any. This is necessary to get the right version of torch installed.
|
||||
|
||||
=== "Invoke v5.10.0 and later"
|
||||
=== "Invoke v5.12 and later"
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu128`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.2.4`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
=== "Invoke v5.10.0 to v5.11.0"
|
||||
|
||||
- If you are on Windows or Linux with an Nvidia GPU, use `https://download.pytorch.org/whl/cu126`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
|
||||
@@ -13,6 +13,7 @@ If you'd prefer, you can also just download the whole node folder from the linke
|
||||
To use a community workflow, download the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
|
||||
|
||||
- Community Nodes
|
||||
+ [Anamorphic Tools](#anamorphic-tools)
|
||||
+ [Adapters-Linked](#adapters-linked-nodes)
|
||||
+ [Autostereogram](#autostereogram-nodes)
|
||||
+ [Average Images](#average-images)
|
||||
@@ -20,9 +21,12 @@ To use a community workflow, download the `.json` node graph file and load it in
|
||||
+ [Close Color Mask](#close-color-mask)
|
||||
+ [Clothing Mask](#clothing-mask)
|
||||
+ [Contrast Limited Adaptive Histogram Equalization](#contrast-limited-adaptive-histogram-equalization)
|
||||
+ [Curves](#curves)
|
||||
+ [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj)
|
||||
+ [Enhance Detail](#enhance-detail)
|
||||
+ [Film Grain](#film-grain)
|
||||
+ [Flip Pose](#flip-pose)
|
||||
+ [Flux Ideal Size](#flux-ideal-size)
|
||||
+ [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes)
|
||||
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
|
||||
+ [Grid to Gif](#grid-to-gif)
|
||||
@@ -61,6 +65,13 @@ To use a community workflow, download the `.json` node graph file and load it in
|
||||
- [Help](#help)
|
||||
|
||||
|
||||
--------------------------------
|
||||
### Anamorphic Tools
|
||||
|
||||
**Description:** A set of nodes to perform anamorphic modifications to images, like lens blur, streaks, spherical distortion, and vignetting.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/anamorphic-tools
|
||||
|
||||
--------------------------------
|
||||
### Adapters Linked Nodes
|
||||
|
||||
@@ -132,6 +143,13 @@ Node Link: https://github.com/VeyDlin/clahe-node
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/clahe-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Curves
|
||||
|
||||
**Description:** Adjust an image's curve based on a user-defined string.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/curves-node
|
||||
|
||||
--------------------------------
|
||||
### Depth Map from Wavefront OBJ
|
||||
|
||||
@@ -162,6 +180,20 @@ To be imported, an .obj must use triangulated meshes, so make sure to enable tha
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/film-grain-node
|
||||
|
||||
--------------------------------
|
||||
### Flip Pose
|
||||
|
||||
**Description:** This node will flip an openpose image horizontally, recoloring it to make sure that it isn't facing the wrong direction. Note that it does not work with openpose hands.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/flip-pose-node
|
||||
|
||||
--------------------------------
|
||||
### Flux Ideal Size
|
||||
|
||||
**Description:** This node returns an ideal size to use for the first stage of a Flux image generation pipeline. Generating at the right size helps limit duplication and odd subject placement.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/flux-ideal-size
|
||||
|
||||
--------------------------------
|
||||
### Generative Grammar-Based Prompt Nodes
|
||||
|
||||
|
||||
@@ -23,6 +23,10 @@ from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_images.model_images_default import ModelImageFileStorageDisk
|
||||
from invokeai.app.services.model_manager.model_manager_default import ModelManagerService
|
||||
from invokeai.app.services.model_records.model_records_sql import ModelRecordServiceSQL
|
||||
from invokeai.app.services.model_relationship_records.model_relationship_records_sqlite import (
|
||||
SqliteModelRelationshipRecordStorage,
|
||||
)
|
||||
from invokeai.app.services.model_relationships.model_relationships_default import ModelRelationshipsService
|
||||
from invokeai.app.services.names.names_default import SimpleNameService
|
||||
from invokeai.app.services.object_serializer.object_serializer_disk import ObjectSerializerDisk
|
||||
from invokeai.app.services.object_serializer.object_serializer_forward_cache import ObjectSerializerForwardCache
|
||||
@@ -136,6 +140,8 @@ class ApiDependencies:
|
||||
download_queue=download_queue_service,
|
||||
events=events,
|
||||
)
|
||||
model_relationships = ModelRelationshipsService()
|
||||
model_relationship_records = SqliteModelRelationshipRecordStorage(db=db)
|
||||
names = SimpleNameService()
|
||||
performance_statistics = InvocationStatsService()
|
||||
session_processor = DefaultSessionProcessor(session_runner=DefaultSessionRunner())
|
||||
@@ -161,6 +167,8 @@ class ApiDependencies:
|
||||
logger=logger,
|
||||
model_images=model_images_service,
|
||||
model_manager=model_manager,
|
||||
model_relationships=model_relationships,
|
||||
model_relationship_records=model_relationship_records,
|
||||
download_queue=download_queue_service,
|
||||
names=names,
|
||||
performance_statistics=performance_statistics,
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import typing
|
||||
from enum import Enum
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
from importlib.metadata import distributions
|
||||
from pathlib import Path
|
||||
from platform import python_version
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
@@ -44,24 +43,6 @@ class AppVersion(BaseModel):
|
||||
highlights: Optional[list[str]] = Field(default=None, description="Highlights of release")
|
||||
|
||||
|
||||
class AppDependencyVersions(BaseModel):
|
||||
"""App depencency Versions Response"""
|
||||
|
||||
accelerate: str = Field(description="accelerate version")
|
||||
compel: str = Field(description="compel version")
|
||||
cuda: Optional[str] = Field(description="CUDA version")
|
||||
diffusers: str = Field(description="diffusers version")
|
||||
numpy: str = Field(description="Numpy version")
|
||||
opencv: str = Field(description="OpenCV version")
|
||||
onnx: str = Field(description="ONNX version")
|
||||
pillow: str = Field(description="Pillow (PIL) version")
|
||||
python: str = Field(description="Python version")
|
||||
torch: str = Field(description="PyTorch version")
|
||||
torchvision: str = Field(description="PyTorch Vision version")
|
||||
transformers: str = Field(description="transformers version")
|
||||
xformers: Optional[str] = Field(description="xformers version")
|
||||
|
||||
|
||||
class AppConfig(BaseModel):
|
||||
"""App Config Response"""
|
||||
|
||||
@@ -76,27 +57,19 @@ async def get_version() -> AppVersion:
|
||||
return AppVersion(version=__version__)
|
||||
|
||||
|
||||
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=AppDependencyVersions)
|
||||
async def get_app_deps() -> AppDependencyVersions:
|
||||
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=dict[str, str])
|
||||
async def get_app_deps() -> dict[str, str]:
|
||||
deps: dict[str, str] = {dist.metadata["Name"]: dist.version for dist in distributions()}
|
||||
try:
|
||||
xformers = version("xformers")
|
||||
except PackageNotFoundError:
|
||||
xformers = None
|
||||
return AppDependencyVersions(
|
||||
accelerate=version("accelerate"),
|
||||
compel=version("compel"),
|
||||
cuda=torch.version.cuda,
|
||||
diffusers=version("diffusers"),
|
||||
numpy=version("numpy"),
|
||||
opencv=version("opencv-python"),
|
||||
onnx=version("onnx"),
|
||||
pillow=version("pillow"),
|
||||
python=python_version(),
|
||||
torch=torch.version.__version__,
|
||||
torchvision=version("torchvision"),
|
||||
transformers=version("transformers"),
|
||||
xformers=xformers,
|
||||
)
|
||||
cuda = torch.version.cuda or "N/A"
|
||||
except Exception:
|
||||
cuda = "N/A"
|
||||
|
||||
deps["CUDA"] = cuda
|
||||
|
||||
sorted_deps = dict(sorted(deps.items(), key=lambda item: item[0].lower()))
|
||||
|
||||
return sorted_deps
|
||||
|
||||
|
||||
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
|
||||
|
||||
@@ -146,7 +146,7 @@ async def list_boards(
|
||||
response_model=list[str],
|
||||
)
|
||||
async def list_all_board_image_names(
|
||||
board_id: str = Path(description="The id of the board"),
|
||||
board_id: str = Path(description="The id of the board or 'none' for uncategorized images"),
|
||||
categories: list[ImageCategory] | None = Query(default=None, description="The categories of image to include."),
|
||||
is_intermediate: bool | None = Query(default=None, description="Whether to list intermediate images."),
|
||||
) -> list[str]:
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import io
|
||||
import json
|
||||
import traceback
|
||||
from typing import Optional
|
||||
from typing import ClassVar, Optional
|
||||
|
||||
from fastapi import BackgroundTasks, Body, HTTPException, Path, Query, Request, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.routing import APIRouter
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
from invokeai.app.api.extract_metadata_from_image import extract_metadata_from_image
|
||||
@@ -19,6 +20,8 @@ from invokeai.app.services.image_records.image_records_common import (
|
||||
from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection
|
||||
from invokeai.app.util.controlnet_utils import heuristic_resize_fast
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
|
||||
images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||
|
||||
@@ -27,6 +30,19 @@ images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||
IMAGE_MAX_AGE = 31536000
|
||||
|
||||
|
||||
class ResizeToDimensions(BaseModel):
|
||||
width: int = Field(..., gt=0)
|
||||
height: int = Field(..., gt=0)
|
||||
|
||||
MAX_SIZE: ClassVar[int] = 4096 * 4096
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_total_output_size(self):
|
||||
if self.width * self.height > self.MAX_SIZE:
|
||||
raise ValueError(f"Max total output size for resizing is {self.MAX_SIZE} pixels")
|
||||
return self
|
||||
|
||||
|
||||
@images_router.post(
|
||||
"/upload",
|
||||
operation_id="upload_image",
|
||||
@@ -46,6 +62,11 @@ async def upload_image(
|
||||
board_id: Optional[str] = Query(default=None, description="The board to add this image to, if any"),
|
||||
session_id: Optional[str] = Query(default=None, description="The session ID associated with this upload, if any"),
|
||||
crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"),
|
||||
resize_to: Optional[str] = Body(
|
||||
default=None,
|
||||
description=f"Dimensions to resize the image to, must be stringified tuple of 2 integers. Max total pixel count: {ResizeToDimensions.MAX_SIZE}",
|
||||
example='"[1024,1024]"',
|
||||
),
|
||||
metadata: Optional[str] = Body(
|
||||
default=None,
|
||||
description="The metadata to associate with the image, must be a stringified JSON dict",
|
||||
@@ -59,13 +80,33 @@ async def upload_image(
|
||||
contents = await file.read()
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
if crop_visible:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
except Exception:
|
||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
|
||||
if crop_visible:
|
||||
try:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to crop image")
|
||||
|
||||
if resize_to:
|
||||
try:
|
||||
dims = json.loads(resize_to)
|
||||
resize_dims = ResizeToDimensions(**dims)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=400, detail="Invalid resize_to format or size")
|
||||
|
||||
try:
|
||||
# heuristic_resize_fast expects an RGB or RGBA image
|
||||
pil_rgba = pil_image.convert("RGBA")
|
||||
np_image = pil_to_np(pil_rgba)
|
||||
np_image = heuristic_resize_fast(np_image, (resize_dims.width, resize_dims.height))
|
||||
pil_image = np_to_pil(np_image)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to resize image")
|
||||
|
||||
extracted_metadata = extract_metadata_from_image(
|
||||
pil_image=pil_image,
|
||||
invokeai_metadata_override=metadata,
|
||||
@@ -356,6 +397,29 @@ async def delete_images_from_list(
|
||||
raise HTTPException(status_code=500, detail="Failed to delete images")
|
||||
|
||||
|
||||
@images_router.delete(
|
||||
"/uncategorized", operation_id="delete_uncategorized_images", response_model=DeleteImagesFromListResult
|
||||
)
|
||||
async def delete_uncategorized_images() -> DeleteImagesFromListResult:
|
||||
"""Deletes all images that are uncategorized"""
|
||||
|
||||
image_names = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
|
||||
board_id="none", categories=None, is_intermediate=None
|
||||
)
|
||||
|
||||
try:
|
||||
deleted_images: list[str] = []
|
||||
for image_name in image_names:
|
||||
try:
|
||||
ApiDependencies.invoker.services.images.delete(image_name)
|
||||
deleted_images.append(image_name)
|
||||
except Exception:
|
||||
pass
|
||||
return DeleteImagesFromListResult(deleted_images=deleted_images)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=500, detail="Failed to delete images")
|
||||
|
||||
|
||||
class ImagesUpdatedFromListResult(BaseModel):
|
||||
updated_image_names: list[str] = Field(description="The image names that were updated")
|
||||
|
||||
|
||||
@@ -893,6 +893,12 @@ class HFTokenHelper:
|
||||
huggingface_hub.login(token=token, add_to_git_credential=False)
|
||||
return cls.get_status()
|
||||
|
||||
@classmethod
|
||||
def reset_token(cls) -> HFTokenStatus:
|
||||
with SuppressOutput(), contextlib.suppress(Exception):
|
||||
huggingface_hub.logout()
|
||||
return cls.get_status()
|
||||
|
||||
|
||||
@model_manager_router.get("/hf_login", operation_id="get_hf_login_status", response_model=HFTokenStatus)
|
||||
async def get_hf_login_status() -> HFTokenStatus:
|
||||
@@ -915,3 +921,8 @@ async def do_hf_login(
|
||||
ApiDependencies.invoker.services.logger.warning("Unable to verify HF token")
|
||||
|
||||
return token_status
|
||||
|
||||
|
||||
@model_manager_router.delete("/hf_login", operation_id="reset_hf_token", response_model=HFTokenStatus)
|
||||
async def reset_hf_token() -> HFTokenStatus:
|
||||
return HFTokenHelper.reset_token()
|
||||
|
||||
215
invokeai/app/api/routers/model_relationships.py
Normal file
215
invokeai/app/api/routers/model_relationships.py
Normal file
@@ -0,0 +1,215 @@
|
||||
"""FastAPI route for model relationship records."""
|
||||
|
||||
from typing import List
|
||||
|
||||
from fastapi import APIRouter, Body, HTTPException, Path, status
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
|
||||
model_relationships_router = APIRouter(prefix="/v1/model_relationships", tags=["model_relationships"])
|
||||
|
||||
# === Schemas ===
|
||||
|
||||
|
||||
class ModelRelationshipCreateRequest(BaseModel):
|
||||
model_key_1: str = Field(
|
||||
...,
|
||||
description="The key of the first model in the relationship",
|
||||
examples=[
|
||||
"aa3b247f-90c9-4416-bfcd-aeaa57a5339e",
|
||||
"ac32b914-10ab-496e-a24a-3068724b9c35",
|
||||
"d944abfd-c7c3-42e2-a4ff-da640b29b8b4",
|
||||
"b1c2d3e4-f5a6-7890-abcd-ef1234567890",
|
||||
"12345678-90ab-cdef-1234-567890abcdef",
|
||||
"fedcba98-7654-3210-fedc-ba9876543210",
|
||||
],
|
||||
)
|
||||
model_key_2: str = Field(
|
||||
...,
|
||||
description="The key of the second model in the relationship",
|
||||
examples=[
|
||||
"3bb7c0eb-b6c8-469c-ad8c-4d69c06075e4",
|
||||
"f0c3da4e-d9ff-42b5-a45c-23be75c887c9",
|
||||
"38170dd8-f1e5-431e-866c-2c81f1277fcc",
|
||||
"c57fea2d-7646-424c-b9ad-c0ba60fc68be",
|
||||
"10f7807b-ab54-46a9-ab03-600e88c630a1",
|
||||
"f6c1d267-cf87-4ee0-bee0-37e791eacab7",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
class ModelRelationshipBatchRequest(BaseModel):
|
||||
model_keys: List[str] = Field(
|
||||
...,
|
||||
description="List of model keys to fetch related models for",
|
||||
examples=[
|
||||
[
|
||||
"aa3b247f-90c9-4416-bfcd-aeaa57a5339e",
|
||||
"ac32b914-10ab-496e-a24a-3068724b9c35",
|
||||
],
|
||||
[
|
||||
"b1c2d3e4-f5a6-7890-abcd-ef1234567890",
|
||||
"12345678-90ab-cdef-1234-567890abcdef",
|
||||
"fedcba98-7654-3210-fedc-ba9876543210",
|
||||
],
|
||||
[
|
||||
"3bb7c0eb-b6c8-469c-ad8c-4d69c06075e4",
|
||||
],
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
# === Routes ===
|
||||
|
||||
|
||||
@model_relationships_router.get(
|
||||
"/i/{model_key}",
|
||||
operation_id="get_related_models",
|
||||
response_model=list[str],
|
||||
responses={
|
||||
200: {
|
||||
"description": "A list of related model keys was retrieved successfully",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"example": [
|
||||
"15e9eb28-8cfe-47c9-b610-37907a79fc3c",
|
||||
"71272e82-0e5f-46d5-bca9-9a61f4bd8a82",
|
||||
"a5d7cd49-1b98-4534-a475-aeee4ccf5fa2",
|
||||
]
|
||||
}
|
||||
},
|
||||
},
|
||||
404: {"description": "The specified model could not be found"},
|
||||
422: {"description": "Validation error"},
|
||||
},
|
||||
)
|
||||
async def get_related_models(
|
||||
model_key: str = Path(..., description="The key of the model to get relationships for"),
|
||||
) -> list[str]:
|
||||
"""
|
||||
Get a list of model keys related to a given model.
|
||||
"""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.model_relationships.get_related_model_keys(model_key)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@model_relationships_router.post(
|
||||
"/",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
responses={
|
||||
204: {"description": "The relationship was successfully created"},
|
||||
400: {"description": "Invalid model keys or self-referential relationship"},
|
||||
409: {"description": "The relationship already exists"},
|
||||
422: {"description": "Validation error"},
|
||||
500: {"description": "Internal server error"},
|
||||
},
|
||||
summary="Add Model Relationship",
|
||||
description="Creates a **bidirectional** relationship between two models, allowing each to reference the other as related.",
|
||||
)
|
||||
async def add_model_relationship(
|
||||
req: ModelRelationshipCreateRequest = Body(..., description="The model keys to relate"),
|
||||
) -> None:
|
||||
"""
|
||||
Add a relationship between two models.
|
||||
|
||||
Relationships are bidirectional and will be accessible from both models.
|
||||
|
||||
- Raises 400 if keys are invalid or identical.
|
||||
- Raises 409 if the relationship already exists.
|
||||
"""
|
||||
try:
|
||||
if req.model_key_1 == req.model_key_2:
|
||||
raise HTTPException(status_code=400, detail="Cannot relate a model to itself.")
|
||||
|
||||
ApiDependencies.invoker.services.model_relationships.add_model_relationship(
|
||||
req.model_key_1,
|
||||
req.model_key_2,
|
||||
)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@model_relationships_router.delete(
|
||||
"/",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
responses={
|
||||
204: {"description": "The relationship was successfully removed"},
|
||||
400: {"description": "Invalid model keys or self-referential relationship"},
|
||||
404: {"description": "The relationship does not exist"},
|
||||
422: {"description": "Validation error"},
|
||||
500: {"description": "Internal server error"},
|
||||
},
|
||||
summary="Remove Model Relationship",
|
||||
description="Removes a **bidirectional** relationship between two models. The relationship must already exist.",
|
||||
)
|
||||
async def remove_model_relationship(
|
||||
req: ModelRelationshipCreateRequest = Body(..., description="The model keys to disconnect"),
|
||||
) -> None:
|
||||
"""
|
||||
Removes a bidirectional relationship between two model keys.
|
||||
|
||||
- Raises 400 if attempting to unlink a model from itself.
|
||||
- Raises 404 if the relationship was not found.
|
||||
"""
|
||||
try:
|
||||
if req.model_key_1 == req.model_key_2:
|
||||
raise HTTPException(status_code=400, detail="Cannot unlink a model from itself.")
|
||||
|
||||
ApiDependencies.invoker.services.model_relationships.remove_model_relationship(
|
||||
req.model_key_1,
|
||||
req.model_key_2,
|
||||
)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@model_relationships_router.post(
|
||||
"/batch",
|
||||
operation_id="get_related_models_batch",
|
||||
response_model=List[str],
|
||||
responses={
|
||||
200: {
|
||||
"description": "Related model keys retrieved successfully",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"example": [
|
||||
"ca562b14-995e-4a42-90c1-9528f1a5921d",
|
||||
"cc0c2b8a-c62e-41d6-878e-cc74dde5ca8f",
|
||||
"18ca7649-6a9e-47d5-bc17-41ab1e8cec81",
|
||||
"7c12d1b2-0ef9-4bec-ba55-797b2d8f2ee1",
|
||||
"c382eaa3-0e28-4ab0-9446-408667699aeb",
|
||||
"71272e82-0e5f-46d5-bca9-9a61f4bd8a82",
|
||||
"a5d7cd49-1b98-4534-a475-aeee4ccf5fa2",
|
||||
]
|
||||
}
|
||||
},
|
||||
},
|
||||
422: {"description": "Validation error"},
|
||||
500: {"description": "Internal server error"},
|
||||
},
|
||||
summary="Get Related Model Keys (Batch)",
|
||||
description="Retrieves all **unique related model keys** for a list of given models. This is useful for contextual suggestions or filtering.",
|
||||
)
|
||||
async def get_related_models_batch(
|
||||
req: ModelRelationshipBatchRequest = Body(..., description="Model keys to check for related connections"),
|
||||
) -> list[str]:
|
||||
"""
|
||||
Accepts multiple model keys and returns a flat list of all unique related keys.
|
||||
|
||||
Useful when working with multiple selections in the UI or cross-model comparisons.
|
||||
"""
|
||||
try:
|
||||
all_related: set[str] = set()
|
||||
for key in req.model_keys:
|
||||
related = ApiDependencies.invoker.services.model_relationships.get_related_model_keys(key)
|
||||
all_related.update(related)
|
||||
return list(all_related)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
@@ -22,6 +22,7 @@ from invokeai.app.api.routers import (
|
||||
download_queue,
|
||||
images,
|
||||
model_manager,
|
||||
model_relationships,
|
||||
session_queue,
|
||||
style_presets,
|
||||
utilities,
|
||||
@@ -125,6 +126,7 @@ app.include_router(download_queue.download_queue_router, prefix="/api")
|
||||
app.include_router(images.images_router, prefix="/api")
|
||||
app.include_router(boards.boards_router, prefix="/api")
|
||||
app.include_router(board_images.board_images_router, prefix="/api")
|
||||
app.include_router(model_relationships.model_relationships_router, prefix="/api")
|
||||
app.include_router(app_info.app_router, prefix="/api")
|
||||
app.include_router(session_queue.session_queue_router, prefix="/api")
|
||||
app.include_router(workflows.workflows_router, prefix="/api")
|
||||
@@ -156,7 +158,7 @@ web_root_path = Path(list(web_dir.__path__)[0])
|
||||
try:
|
||||
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
|
||||
except RuntimeError:
|
||||
logger.warn(f"No UI found at {web_root_path}/dist, skipping UI mount")
|
||||
logger.warning(f"No UI found at {web_root_path}/dist, skipping UI mount")
|
||||
app.mount(
|
||||
"/static", NoCacheStaticFiles(directory=Path(web_root_path, "static/")), name="static"
|
||||
) # docs favicon is in here
|
||||
|
||||
@@ -5,6 +5,8 @@ from __future__ import annotations
|
||||
import inspect
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
import typing
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
@@ -20,12 +22,14 @@ from typing import (
|
||||
Literal,
|
||||
Optional,
|
||||
Type,
|
||||
TypedDict,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
import semver
|
||||
from pydantic import BaseModel, ConfigDict, Field, TypeAdapter, create_model
|
||||
from pydantic import BaseModel, ConfigDict, Field, JsonValue, TypeAdapter, create_model
|
||||
from pydantic.fields import FieldInfo
|
||||
from pydantic_core import PydanticUndefined
|
||||
|
||||
@@ -72,13 +76,24 @@ class Classification(str, Enum, metaclass=MetaEnum):
|
||||
Special = "special"
|
||||
|
||||
|
||||
class Bottleneck(str, Enum, metaclass=MetaEnum):
|
||||
"""
|
||||
The bottleneck of an invocation.
|
||||
- `Network`: The invocation's execution is network-bound.
|
||||
- `GPU`: The invocation's execution is GPU-bound.
|
||||
"""
|
||||
|
||||
Network = "network"
|
||||
GPU = "gpu"
|
||||
|
||||
|
||||
class UIConfigBase(BaseModel):
|
||||
"""
|
||||
Provides additional node configuration to the UI.
|
||||
This is used internally by the @invocation decorator logic. Do not use this directly.
|
||||
"""
|
||||
|
||||
tags: Optional[list[str]] = Field(default_factory=None, description="The node's tags")
|
||||
tags: Optional[list[str]] = Field(default=None, description="The node's tags")
|
||||
title: Optional[str] = Field(default=None, description="The node's display name")
|
||||
category: Optional[str] = Field(default=None, description="The node's category")
|
||||
version: str = Field(
|
||||
@@ -93,6 +108,11 @@ class UIConfigBase(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class OriginalModelField(TypedDict):
|
||||
annotation: Any
|
||||
field_info: FieldInfo
|
||||
|
||||
|
||||
class BaseInvocationOutput(BaseModel):
|
||||
"""
|
||||
Base class for all invocation outputs.
|
||||
@@ -100,6 +120,12 @@ class BaseInvocationOutput(BaseModel):
|
||||
All invocation outputs must use the `@invocation_output` decorator to provide their unique type.
|
||||
"""
|
||||
|
||||
output_meta: Optional[dict[str, JsonValue]] = Field(
|
||||
default=None,
|
||||
description="Optional dictionary of metadata for the invocation output, unrelated to the invocation's actual output value. This is not exposed as an output field.",
|
||||
json_schema_extra={"field_kind": FieldKind.NodeAttribute},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocationOutput]) -> None:
|
||||
"""Adds various UI-facing attributes to the invocation output's OpenAPI schema."""
|
||||
@@ -115,6 +141,9 @@ class BaseInvocationOutput(BaseModel):
|
||||
"""Gets the invocation output's type, as provided by the `@invocation_output` decorator."""
|
||||
return cls.model_fields["type"].default
|
||||
|
||||
_original_model_fields: ClassVar[dict[str, OriginalModelField]] = {}
|
||||
"""The original model fields, before any modifications were made by the @invocation_output decorator."""
|
||||
|
||||
model_config = ConfigDict(
|
||||
protected_namespaces=(),
|
||||
validate_assignment=True,
|
||||
@@ -148,7 +177,7 @@ class BaseInvocation(ABC, BaseModel):
|
||||
return cls.model_fields["type"].default
|
||||
|
||||
@classmethod
|
||||
def get_output_annotation(cls) -> BaseInvocationOutput:
|
||||
def get_output_annotation(cls) -> Type[BaseInvocationOutput]:
|
||||
"""Gets the invocation's output annotation (i.e. the return annotation of its `invoke()` method)."""
|
||||
return signature(cls.invoke).return_annotation
|
||||
|
||||
@@ -180,7 +209,7 @@ class BaseInvocation(ABC, BaseModel):
|
||||
Internal invoke method, calls `invoke()` after some prep.
|
||||
Handles optional fields that are required to call `invoke()` and invocation cache.
|
||||
"""
|
||||
for field_name, field in self.model_fields.items():
|
||||
for field_name, field in type(self).model_fields.items():
|
||||
if not field.json_schema_extra or callable(field.json_schema_extra):
|
||||
# something has gone terribly awry, we should always have this and it should be a dict
|
||||
continue
|
||||
@@ -195,9 +224,9 @@ class BaseInvocation(ABC, BaseModel):
|
||||
setattr(self, field_name, orig_default)
|
||||
if orig_required and orig_default is PydanticUndefined and getattr(self, field_name) is None:
|
||||
if input_ == Input.Connection:
|
||||
raise RequiredConnectionException(self.model_fields["type"].default, field_name)
|
||||
raise RequiredConnectionException(type(self).model_fields["type"].default, field_name)
|
||||
elif input_ == Input.Any:
|
||||
raise MissingInputException(self.model_fields["type"].default, field_name)
|
||||
raise MissingInputException(type(self).model_fields["type"].default, field_name)
|
||||
|
||||
# skip node cache codepath if it's disabled
|
||||
if services.configuration.node_cache_size == 0:
|
||||
@@ -235,6 +264,8 @@ class BaseInvocation(ABC, BaseModel):
|
||||
json_schema_extra={"field_kind": FieldKind.NodeAttribute},
|
||||
)
|
||||
|
||||
bottleneck: ClassVar[Bottleneck]
|
||||
|
||||
UIConfig: ClassVar[UIConfigBase]
|
||||
|
||||
model_config = ConfigDict(
|
||||
@@ -245,6 +276,9 @@ class BaseInvocation(ABC, BaseModel):
|
||||
coerce_numbers_to_str=True,
|
||||
)
|
||||
|
||||
_original_model_fields: ClassVar[dict[str, OriginalModelField]] = {}
|
||||
"""The original model fields, before any modifications were made by the @invocation decorator."""
|
||||
|
||||
|
||||
TBaseInvocation = TypeVar("TBaseInvocation", bound=BaseInvocation)
|
||||
|
||||
@@ -256,6 +290,26 @@ class InvocationRegistry:
|
||||
@classmethod
|
||||
def register_invocation(cls, invocation: type[BaseInvocation]) -> None:
|
||||
"""Registers an invocation."""
|
||||
|
||||
invocation_type = invocation.get_type()
|
||||
node_pack = invocation.UIConfig.node_pack
|
||||
|
||||
# Log a warning when an existing invocation is being clobbered by the one we are registering
|
||||
clobbered_invocation = InvocationRegistry.get_invocation_for_type(invocation_type)
|
||||
if clobbered_invocation is not None:
|
||||
# This should always be true - we just checked if the invocation type was in the set
|
||||
clobbered_node_pack = clobbered_invocation.UIConfig.node_pack
|
||||
|
||||
if clobbered_node_pack == "invokeai":
|
||||
# The invocation being clobbered is a core invocation
|
||||
logger.warning(f'Overriding core node "{invocation_type}" with node from "{node_pack}"')
|
||||
else:
|
||||
# The invocation being clobbered is a custom invocation
|
||||
logger.warning(
|
||||
f'Overriding node "{invocation_type}" from "{node_pack}" with node from "{clobbered_node_pack}"'
|
||||
)
|
||||
cls._invocation_classes.remove(clobbered_invocation)
|
||||
|
||||
cls._invocation_classes.add(invocation)
|
||||
cls.invalidate_invocation_typeadapter()
|
||||
|
||||
@@ -314,6 +368,15 @@ class InvocationRegistry:
|
||||
@classmethod
|
||||
def register_output(cls, output: "type[TBaseInvocationOutput]") -> None:
|
||||
"""Registers an invocation output."""
|
||||
output_type = output.get_type()
|
||||
|
||||
# Log a warning when an existing invocation is being clobbered by the one we are registering
|
||||
clobbered_output = InvocationRegistry.get_output_for_type(output_type)
|
||||
if clobbered_output is not None:
|
||||
# TODO(psyche): We do not record the node pack of the output, so we cannot log it here
|
||||
logger.warning(f'Overriding invocation output "{output_type}"')
|
||||
cls._output_classes.remove(clobbered_output)
|
||||
|
||||
cls._output_classes.add(output)
|
||||
cls.invalidate_output_typeadapter()
|
||||
|
||||
@@ -322,6 +385,11 @@ class InvocationRegistry:
|
||||
"""Gets all invocation outputs."""
|
||||
return cls._output_classes
|
||||
|
||||
@classmethod
|
||||
def get_outputs_map(cls) -> dict[str, type[BaseInvocationOutput]]:
|
||||
"""Gets a map of all output types to their output classes."""
|
||||
return {i.get_type(): i for i in cls.get_output_classes()}
|
||||
|
||||
@classmethod
|
||||
@lru_cache(maxsize=1)
|
||||
def get_output_typeadapter(cls) -> TypeAdapter[Any]:
|
||||
@@ -347,6 +415,11 @@ class InvocationRegistry:
|
||||
"""Gets all invocation output types."""
|
||||
return (i.get_type() for i in cls.get_output_classes())
|
||||
|
||||
@classmethod
|
||||
def get_output_for_type(cls, output_type: str) -> type[BaseInvocationOutput] | None:
|
||||
"""Gets the output class for a given output type."""
|
||||
return cls.get_outputs_map().get(output_type)
|
||||
|
||||
|
||||
RESERVED_NODE_ATTRIBUTE_FIELD_NAMES = {
|
||||
"id",
|
||||
@@ -354,11 +427,12 @@ RESERVED_NODE_ATTRIBUTE_FIELD_NAMES = {
|
||||
"use_cache",
|
||||
"type",
|
||||
"workflow",
|
||||
"bottleneck",
|
||||
}
|
||||
|
||||
RESERVED_INPUT_FIELD_NAMES = {"metadata", "board"}
|
||||
|
||||
RESERVED_OUTPUT_FIELD_NAMES = {"type"}
|
||||
RESERVED_OUTPUT_FIELD_NAMES = {"type", "output_meta"}
|
||||
|
||||
|
||||
class _Model(BaseModel):
|
||||
@@ -425,11 +499,53 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
|
||||
|
||||
ui_type = field.json_schema_extra.get("ui_type", None)
|
||||
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
|
||||
logger.warn(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
|
||||
logger.warning(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
|
||||
field.json_schema_extra.pop("ui_type")
|
||||
return None
|
||||
|
||||
|
||||
class NoDefaultSentinel:
|
||||
pass
|
||||
|
||||
|
||||
def validate_field_default(
|
||||
cls_name: str, field_name: str, invocation_type: str, annotation: Any, field_info: FieldInfo
|
||||
) -> None:
|
||||
"""Validates the default value of a field against its pydantic field definition."""
|
||||
|
||||
assert isinstance(field_info.json_schema_extra, dict), "json_schema_extra is not a dict"
|
||||
|
||||
# By the time we are doing this, we've already done some pydantic magic by overriding the original default value.
|
||||
# We store the original default value in the json_schema_extra dict, so we can validate it here.
|
||||
orig_default = field_info.json_schema_extra.get("orig_default", NoDefaultSentinel)
|
||||
|
||||
if orig_default is NoDefaultSentinel:
|
||||
return
|
||||
|
||||
# To validate the default value, we can create a temporary pydantic model with the field we are validating as its
|
||||
# only field. Then validate the default value against this temporary model.
|
||||
TempDefaultValidator = cast(BaseModel, create_model(cls_name, **{field_name: (annotation, field_info)}))
|
||||
|
||||
try:
|
||||
TempDefaultValidator.model_validate({field_name: orig_default})
|
||||
except Exception as e:
|
||||
raise InvalidFieldError(
|
||||
f'Default value for field "{field_name}" on invocation "{invocation_type}" is invalid, {e}'
|
||||
) from e
|
||||
|
||||
|
||||
def is_optional(annotation: Any) -> bool:
|
||||
"""
|
||||
Checks if the given annotation is optional (i.e. Optional[X], Union[X, None] or X | None).
|
||||
"""
|
||||
origin = typing.get_origin(annotation)
|
||||
# PEP 604 unions (int|None) have origin types.UnionType
|
||||
is_union = origin is typing.Union or origin is types.UnionType
|
||||
if not is_union:
|
||||
return False
|
||||
return any(arg is type(None) for arg in typing.get_args(annotation))
|
||||
|
||||
|
||||
def invocation(
|
||||
invocation_type: str,
|
||||
title: Optional[str] = None,
|
||||
@@ -438,6 +554,7 @@ def invocation(
|
||||
version: Optional[str] = None,
|
||||
use_cache: Optional[bool] = True,
|
||||
classification: Classification = Classification.Stable,
|
||||
bottleneck: Bottleneck = Bottleneck.GPU,
|
||||
) -> Callable[[Type[TBaseInvocation]], Type[TBaseInvocation]]:
|
||||
"""
|
||||
Registers an invocation.
|
||||
@@ -449,6 +566,7 @@ def invocation(
|
||||
:param Optional[str] version: Adds a version to the invocation. Must be a valid semver string. Defaults to None.
|
||||
:param Optional[bool] use_cache: Whether or not to use the invocation cache. Defaults to True. The user may override this in the workflow editor.
|
||||
:param Classification classification: The classification of the invocation. Defaults to FeatureClassification.Stable. Use Beta or Prototype if the invocation is unstable.
|
||||
:param Bottleneck bottleneck: The bottleneck of the invocation. Defaults to Bottleneck.GPU. Use Network if the invocation is network-bound.
|
||||
"""
|
||||
|
||||
def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]:
|
||||
@@ -460,27 +578,26 @@ def invocation(
|
||||
# The node pack is the module name - will be "invokeai" for built-in nodes
|
||||
node_pack = cls.__module__.split(".")[0]
|
||||
|
||||
# Handle the case where an existing node is being clobbered by the one we are registering
|
||||
if invocation_type in InvocationRegistry.get_invocation_types():
|
||||
clobbered_invocation = InvocationRegistry.get_invocation_for_type(invocation_type)
|
||||
# This should always be true - we just checked if the invocation type was in the set
|
||||
assert clobbered_invocation is not None
|
||||
|
||||
clobbered_node_pack = clobbered_invocation.UIConfig.node_pack
|
||||
|
||||
if clobbered_node_pack == "invokeai":
|
||||
# The node being clobbered is a core node
|
||||
raise ValueError(
|
||||
f'Cannot load node "{invocation_type}" from node pack "{node_pack}" - a core node with the same type already exists'
|
||||
)
|
||||
else:
|
||||
# The node being clobbered is a custom node
|
||||
raise ValueError(
|
||||
f'Cannot load node "{invocation_type}" from node pack "{node_pack}" - a node with the same type already exists in node pack "{clobbered_node_pack}"'
|
||||
)
|
||||
|
||||
validate_fields(cls.model_fields, invocation_type)
|
||||
|
||||
fields: dict[str, tuple[Any, FieldInfo]] = {}
|
||||
|
||||
for field_name, field_info in cls.model_fields.items():
|
||||
annotation = field_info.annotation
|
||||
assert annotation is not None, f"{field_name} on invocation {invocation_type} has no type annotation."
|
||||
assert isinstance(field_info.json_schema_extra, dict), (
|
||||
f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?"
|
||||
)
|
||||
|
||||
cls._original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
|
||||
|
||||
validate_field_default(cls.__name__, field_name, invocation_type, annotation, field_info)
|
||||
|
||||
if field_info.default is None and not is_optional(annotation):
|
||||
annotation = annotation | None
|
||||
|
||||
fields[field_name] = (annotation, field_info)
|
||||
|
||||
# Add OpenAPI schema extras
|
||||
uiconfig: dict[str, Any] = {}
|
||||
uiconfig["title"] = title
|
||||
@@ -496,7 +613,7 @@ def invocation(
|
||||
raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e
|
||||
uiconfig["version"] = version
|
||||
else:
|
||||
logger.warn(f'No version specified for node "{invocation_type}", using "1.0.0"')
|
||||
logger.warning(f'No version specified for node "{invocation_type}", using "1.0.0"')
|
||||
uiconfig["version"] = "1.0.0"
|
||||
|
||||
cls.UIConfig = UIConfigBase(**uiconfig)
|
||||
@@ -504,6 +621,8 @@ def invocation(
|
||||
if use_cache is not None:
|
||||
cls.model_fields["use_cache"].default = use_cache
|
||||
|
||||
cls.bottleneck = bottleneck
|
||||
|
||||
# Add the invocation type to the model.
|
||||
|
||||
# You'd be tempted to just add the type field and rebuild the model, like this:
|
||||
@@ -513,11 +632,27 @@ def invocation(
|
||||
# Unfortunately, because the `GraphInvocation` uses a forward ref in its `graph` field's annotation, this does
|
||||
# not work. Instead, we have to create a new class with the type field and patch the original class with it.
|
||||
|
||||
invocation_type_annotation = Literal[invocation_type] # type: ignore
|
||||
invocation_type_field = Field(
|
||||
title="type", default=invocation_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute}
|
||||
invocation_type_annotation = Literal[invocation_type]
|
||||
|
||||
# Field() returns an instance of FieldInfo, but thanks to a pydantic implementation detail, it is _typed_ as Any.
|
||||
# This cast makes the type annotation match the class's true type.
|
||||
invocation_type_field_info = cast(
|
||||
FieldInfo,
|
||||
Field(title="type", default=invocation_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute}),
|
||||
)
|
||||
|
||||
fields["type"] = (invocation_type_annotation, invocation_type_field_info)
|
||||
|
||||
# Invocation outputs must be registered using the @invocation_output decorator, but it is possible that the
|
||||
# output is registered _after_ this invocation is registered. It depends on module import ordering.
|
||||
#
|
||||
# We can only confirm the output for an invocation is registered after all modules are imported. There's
|
||||
# only really one good time to do that - during application startup, in `run_app.py`, after loading all
|
||||
# custom nodes.
|
||||
#
|
||||
# We can still do some basic validation here - ensure the invoke method is defined and returns an instance
|
||||
# of BaseInvocationOutput.
|
||||
|
||||
# Validate the `invoke()` method is implemented
|
||||
if "invoke" in cls.__abstractmethods__:
|
||||
raise ValueError(f'Invocation "{invocation_type}" must implement the "invoke" method')
|
||||
@@ -539,17 +674,12 @@ def invocation(
|
||||
)
|
||||
|
||||
docstring = cls.__doc__
|
||||
cls = create_model(
|
||||
cls.__qualname__,
|
||||
__base__=cls,
|
||||
__module__=cls.__module__,
|
||||
type=(invocation_type_annotation, invocation_type_field),
|
||||
)
|
||||
cls.__doc__ = docstring
|
||||
new_class = create_model(cls.__qualname__, __base__=cls, __module__=cls.__module__, **fields) # type: ignore
|
||||
new_class.__doc__ = docstring
|
||||
|
||||
InvocationRegistry.register_invocation(cls)
|
||||
InvocationRegistry.register_invocation(new_class)
|
||||
|
||||
return cls
|
||||
return new_class
|
||||
|
||||
return wrapper
|
||||
|
||||
@@ -572,29 +702,41 @@ def invocation_output(
|
||||
if re.compile(r"^\S+$").match(output_type) is None:
|
||||
raise ValueError(f'"output_type" must consist of non-whitespace characters, got "{output_type}"')
|
||||
|
||||
if output_type in InvocationRegistry.get_output_types():
|
||||
raise ValueError(f'Invocation type "{output_type}" already exists')
|
||||
|
||||
validate_fields(cls.model_fields, output_type)
|
||||
|
||||
# Add the output type to the model.
|
||||
fields: dict[str, tuple[Any, FieldInfo]] = {}
|
||||
|
||||
output_type_annotation = Literal[output_type] # type: ignore
|
||||
output_type_field = Field(
|
||||
title="type", default=output_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute}
|
||||
for field_name, field_info in cls.model_fields.items():
|
||||
annotation = field_info.annotation
|
||||
assert annotation is not None, f"{field_name} on invocation output {output_type} has no type annotation."
|
||||
assert isinstance(field_info.json_schema_extra, dict), (
|
||||
f"{field_name} on invocation output {output_type} has a non-dict json_schema_extra, did you forget to use InputField?"
|
||||
)
|
||||
|
||||
cls._original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info)
|
||||
|
||||
if field_info.default is not PydanticUndefined and is_optional(annotation):
|
||||
annotation = annotation | None
|
||||
fields[field_name] = (annotation, field_info)
|
||||
|
||||
# Add the output type to the model.
|
||||
output_type_annotation = Literal[output_type]
|
||||
|
||||
# Field() returns an instance of FieldInfo, but thanks to a pydantic implementation detail, it is _typed_ as Any.
|
||||
# This cast makes the type annotation match the class's true type.
|
||||
output_type_field_info = cast(
|
||||
FieldInfo,
|
||||
Field(title="type", default=output_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute}),
|
||||
)
|
||||
|
||||
fields["type"] = (output_type_annotation, output_type_field_info)
|
||||
|
||||
docstring = cls.__doc__
|
||||
cls = create_model(
|
||||
cls.__qualname__,
|
||||
__base__=cls,
|
||||
__module__=cls.__module__,
|
||||
type=(output_type_annotation, output_type_field),
|
||||
)
|
||||
cls.__doc__ = docstring
|
||||
new_class = create_model(cls.__qualname__, __base__=cls, __module__=cls.__module__, **fields)
|
||||
new_class.__doc__ = docstring
|
||||
|
||||
InvocationRegistry.register_output(cls)
|
||||
InvocationRegistry.register_output(new_class)
|
||||
|
||||
return cls
|
||||
return new_class
|
||||
|
||||
return wrapper
|
||||
|
||||
@@ -64,7 +64,6 @@ class ImageBatchInvocation(BaseBatchInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each image in the batch."""
|
||||
|
||||
images: list[ImageField] = InputField(
|
||||
default=[],
|
||||
min_length=1,
|
||||
description="The images to batch over",
|
||||
)
|
||||
@@ -120,7 +119,6 @@ class StringBatchInvocation(BaseBatchInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each string in the batch."""
|
||||
|
||||
strings: list[str] = InputField(
|
||||
default=[],
|
||||
min_length=1,
|
||||
description="The strings to batch over",
|
||||
)
|
||||
@@ -176,7 +174,6 @@ class IntegerBatchInvocation(BaseBatchInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each integer in the batch."""
|
||||
|
||||
integers: list[int] = InputField(
|
||||
default=[],
|
||||
min_length=1,
|
||||
description="The integers to batch over",
|
||||
)
|
||||
@@ -230,7 +227,6 @@ class FloatBatchInvocation(BaseBatchInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each float in the batch."""
|
||||
|
||||
floats: list[float] = InputField(
|
||||
default=[],
|
||||
min_length=1,
|
||||
description="The floats to batch over",
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Iterator, List, Optional, Tuple, Union, cast
|
||||
|
||||
import torch
|
||||
from compel import Compel, ReturnedEmbeddingsType
|
||||
from compel import Compel, ReturnedEmbeddingsType, SplitLongTextMode
|
||||
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
|
||||
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
||||
|
||||
@@ -104,6 +104,7 @@ class CompelInvocation(BaseInvocation):
|
||||
dtype_for_device_getter=TorchDevice.choose_torch_dtype,
|
||||
truncate_long_prompts=False,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
split_long_text_mode=SplitLongTextMode.SENTENCES,
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(self.prompt)
|
||||
@@ -113,6 +114,13 @@ class CompelInvocation(BaseInvocation):
|
||||
|
||||
c, _options = compel.build_conditioning_tensor_for_conjunction(conjunction)
|
||||
|
||||
del compel
|
||||
del patched_tokenizer
|
||||
del tokenizer
|
||||
del ti_manager
|
||||
del text_encoder
|
||||
del text_encoder_info
|
||||
|
||||
c = c.detach().to("cpu")
|
||||
|
||||
conditioning_data = ConditioningFieldData(conditionings=[BasicConditioningInfo(embeds=c)])
|
||||
@@ -205,6 +213,7 @@ class SDXLPromptInvocationBase:
|
||||
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, # TODO: clip skip
|
||||
requires_pooled=get_pooled,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
split_long_text_mode=SplitLongTextMode.SENTENCES,
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(prompt)
|
||||
@@ -220,7 +229,10 @@ class SDXLPromptInvocationBase:
|
||||
else:
|
||||
c_pooled = None
|
||||
|
||||
del compel
|
||||
del patched_tokenizer
|
||||
del tokenizer
|
||||
del ti_manager
|
||||
del text_encoder
|
||||
del text_encoder_info
|
||||
|
||||
|
||||
@@ -274,12 +274,12 @@ class InvokeAdjustImageHuePlusInvocation(BaseInvocation, WithMetadata, WithBoard
|
||||
title="Enhance Image",
|
||||
tags=["enhance", "image"],
|
||||
category="image",
|
||||
version="1.2.0",
|
||||
version="1.2.1",
|
||||
)
|
||||
class InvokeImageEnhanceInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Applies processing from PIL's ImageEnhance module. Originally created by @dwringer"""
|
||||
|
||||
image: ImageField = InputField(default=None, description="The image for which to apply processing")
|
||||
image: ImageField = InputField(description="The image for which to apply processing")
|
||||
invert: bool = InputField(default=False, description="Whether to invert the image colors")
|
||||
color: float = InputField(ge=0, default=1.0, description="Color enhancement factor")
|
||||
contrast: float = InputField(ge=0, default=1.0, description="Contrast enhancement factor")
|
||||
|
||||
@@ -22,7 +22,11 @@ from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize
|
||||
from invokeai.app.util.controlnet_utils import (
|
||||
CONTROLNET_MODE_VALUES,
|
||||
CONTROLNET_RESIZE_VALUES,
|
||||
heuristic_resize_fast,
|
||||
)
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
|
||||
|
||||
@@ -109,7 +113,7 @@ class ControlNetInvocation(BaseInvocation):
|
||||
title="Heuristic Resize",
|
||||
tags=["image, controlnet"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
version="1.1.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class HeuristicResizeInvocation(BaseInvocation):
|
||||
@@ -122,7 +126,7 @@ class HeuristicResizeInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, "RGB")
|
||||
np_img = pil_to_np(image)
|
||||
np_resized = heuristic_resize(np_img, (self.width, self.height))
|
||||
np_resized = heuristic_resize_fast(np_img, (self.width, self.height))
|
||||
resized = np_to_pil(np_resized)
|
||||
image_dto = context.images.save(image=resized)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
from typing import Literal, Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
from PIL import Image, ImageFilter
|
||||
from PIL import Image
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
@@ -42,15 +44,13 @@ class GradientMaskOutput(BaseInvocationOutput):
|
||||
title="Create Gradient Mask",
|
||||
tags=["mask", "denoise"],
|
||||
category="latents",
|
||||
version="1.2.0",
|
||||
version="1.3.0",
|
||||
)
|
||||
class CreateGradientMaskInvocation(BaseInvocation):
|
||||
"""Creates mask for denoising model run."""
|
||||
"""Creates mask for denoising."""
|
||||
|
||||
mask: ImageField = InputField(default=None, description="Image which will be masked", ui_order=1)
|
||||
edge_radius: int = InputField(
|
||||
default=16, ge=0, description="How far to blur/expand the edges of the mask", ui_order=2
|
||||
)
|
||||
mask: ImageField = InputField(description="Image which will be masked", ui_order=1)
|
||||
edge_radius: int = InputField(default=16, ge=0, description="How far to expand the edges of the mask", ui_order=2)
|
||||
coherence_mode: Literal["Gaussian Blur", "Box Blur", "Staged"] = InputField(default="Gaussian Blur", ui_order=3)
|
||||
minimum_denoise: float = InputField(
|
||||
default=0.0, ge=0, le=1, description="Minimum denoise level for the coherence region", ui_order=4
|
||||
@@ -81,45 +81,110 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> GradientMaskOutput:
|
||||
mask_image = context.images.get_pil(self.mask.image_name, mode="L")
|
||||
|
||||
# Resize the mask_image. Makes the filter 64x faster and doesn't hurt quality in latent scale anyway
|
||||
mask_image = mask_image.resize(
|
||||
(
|
||||
mask_image.width // LATENT_SCALE_FACTOR,
|
||||
mask_image.height // LATENT_SCALE_FACTOR,
|
||||
),
|
||||
resample=Image.Resampling.BILINEAR,
|
||||
)
|
||||
|
||||
mask_np_orig = np.array(mask_image, dtype=np.float32)
|
||||
|
||||
self.edge_radius = self.edge_radius // LATENT_SCALE_FACTOR # scale the edge radius to match the mask size
|
||||
|
||||
if self.edge_radius > 0:
|
||||
mask_np = 255 - mask_np_orig # invert so 0 is unmasked (higher values = higher denoise strength)
|
||||
dilated_mask = mask_np.copy()
|
||||
|
||||
# Create kernel based on coherence mode
|
||||
if self.coherence_mode == "Box Blur":
|
||||
blur_mask = mask_image.filter(ImageFilter.BoxBlur(self.edge_radius))
|
||||
else: # Gaussian Blur OR Staged
|
||||
# Gaussian Blur uses standard deviation. 1/2 radius is a good approximation
|
||||
blur_mask = mask_image.filter(ImageFilter.GaussianBlur(self.edge_radius / 2))
|
||||
# Create a circular distance kernel that fades from center outward
|
||||
kernel_size = self.edge_radius * 2 + 1
|
||||
center = self.edge_radius
|
||||
kernel = np.zeros((kernel_size, kernel_size), dtype=np.float32)
|
||||
for i in range(kernel_size):
|
||||
for j in range(kernel_size):
|
||||
dist = np.sqrt((i - center) ** 2 + (j - center) ** 2)
|
||||
if dist <= self.edge_radius:
|
||||
kernel[i, j] = 1.0 - (dist / self.edge_radius)
|
||||
else: # Gaussian Blur or Staged
|
||||
# Create a Gaussian kernel
|
||||
kernel_size = self.edge_radius * 2 + 1
|
||||
kernel = cv2.getGaussianKernel(
|
||||
kernel_size, self.edge_radius / 2.5
|
||||
) # 2.5 is a magic number (standard deviation capturing)
|
||||
kernel = kernel * kernel.T # Make 2D gaussian kernel
|
||||
kernel = kernel / np.max(kernel) # Normalize center to 1.0
|
||||
|
||||
blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(blur_mask, normalize=False)
|
||||
# Ensure values outside radius are 0
|
||||
center = self.edge_radius
|
||||
for i in range(kernel_size):
|
||||
for j in range(kernel_size):
|
||||
dist = np.sqrt((i - center) ** 2 + (j - center) ** 2)
|
||||
if dist > self.edge_radius:
|
||||
kernel[i, j] = 0
|
||||
|
||||
# redistribute blur so that the original edges are 0 and blur outwards to 1
|
||||
blur_tensor = (blur_tensor - 0.5) * 2
|
||||
blur_tensor[blur_tensor < 0] = 0.0
|
||||
# 2D max filter
|
||||
mask_tensor = torch.tensor(mask_np)
|
||||
kernel_tensor = torch.tensor(kernel)
|
||||
dilated_mask = 255 - self.max_filter2D_torch(mask_tensor, kernel_tensor).cpu()
|
||||
dilated_mask = dilated_mask.numpy()
|
||||
|
||||
threshold = 1 - self.minimum_denoise
|
||||
threshold = (1 - self.minimum_denoise) * 255
|
||||
|
||||
if self.coherence_mode == "Staged":
|
||||
# wherever the blur_tensor is less than fully masked, convert it to threshold
|
||||
blur_tensor = torch.where((blur_tensor < 1) & (blur_tensor > 0), threshold, blur_tensor)
|
||||
else:
|
||||
# wherever the blur_tensor is above threshold but less than 1, drop it to threshold
|
||||
blur_tensor = torch.where((blur_tensor > threshold) & (blur_tensor < 1), threshold, blur_tensor)
|
||||
# wherever expanded mask is darker than the original mask but original was above threshhold, set it to the threshold
|
||||
# makes any expansion areas drop to threshhold. Raising minimum across the image happen outside of this if
|
||||
threshold_mask = (dilated_mask < mask_np_orig) & (mask_np_orig > threshold)
|
||||
dilated_mask = np.where(threshold_mask, threshold, mask_np_orig)
|
||||
|
||||
# wherever expanded mask is less than 255 but greater than threshold, drop it to threshold (minimum denoise)
|
||||
threshold_mask = (dilated_mask > threshold) & (dilated_mask < 255)
|
||||
dilated_mask = np.where(threshold_mask, threshold, dilated_mask)
|
||||
|
||||
else:
|
||||
blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
|
||||
dilated_mask = mask_np_orig.copy()
|
||||
|
||||
mask_name = context.tensors.save(tensor=blur_tensor.unsqueeze(1))
|
||||
# convert to tensor
|
||||
dilated_mask = np.clip(dilated_mask, 0, 255).astype(np.uint8)
|
||||
mask_tensor = torch.tensor(dilated_mask, device=torch.device("cpu"))
|
||||
|
||||
# compute a [0, 1] mask from the blur_tensor
|
||||
expanded_mask = torch.where((blur_tensor < 1), 0, 1)
|
||||
expanded_mask_image = Image.fromarray((expanded_mask.squeeze(0).numpy() * 255).astype(np.uint8), mode="L")
|
||||
# binary mask for compositing
|
||||
expanded_mask = np.where((dilated_mask < 255), 0, 255)
|
||||
expanded_mask_image = Image.fromarray(expanded_mask.astype(np.uint8), mode="L")
|
||||
expanded_mask_image = expanded_mask_image.resize(
|
||||
(
|
||||
mask_image.width * LATENT_SCALE_FACTOR,
|
||||
mask_image.height * LATENT_SCALE_FACTOR,
|
||||
),
|
||||
resample=Image.Resampling.NEAREST,
|
||||
)
|
||||
expanded_image_dto = context.images.save(expanded_mask_image)
|
||||
|
||||
# restore the original mask size
|
||||
dilated_mask = Image.fromarray(dilated_mask.astype(np.uint8))
|
||||
dilated_mask = dilated_mask.resize(
|
||||
(
|
||||
mask_image.width * LATENT_SCALE_FACTOR,
|
||||
mask_image.height * LATENT_SCALE_FACTOR,
|
||||
),
|
||||
resample=Image.Resampling.NEAREST,
|
||||
)
|
||||
|
||||
# stack the mask as a tensor, repeating 4 times on dimmension 1
|
||||
dilated_mask_tensor = image_resized_to_grid_as_tensor(dilated_mask, normalize=False)
|
||||
mask_name = context.tensors.save(tensor=dilated_mask_tensor.unsqueeze(0))
|
||||
|
||||
masked_latents_name = None
|
||||
if self.unet is not None and self.vae is not None and self.image is not None:
|
||||
# all three fields must be present at the same time
|
||||
main_model_config = context.models.get_config(self.unet.unet.key)
|
||||
assert isinstance(main_model_config, MainConfigBase)
|
||||
if main_model_config.variant is ModelVariantType.Inpaint:
|
||||
mask = blur_tensor
|
||||
mask = dilated_mask_tensor
|
||||
vae_info: LoadedModel = context.models.load(self.vae.vae)
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
@@ -137,3 +202,29 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
denoise_mask=DenoiseMaskField(mask_name=mask_name, masked_latents_name=masked_latents_name, gradient=True),
|
||||
expanded_mask_area=ImageField(image_name=expanded_image_dto.image_name),
|
||||
)
|
||||
|
||||
def max_filter2D_torch(self, image: torch.Tensor, kernel: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
This morphological operation is much faster in torch than numpy or opencv
|
||||
For reasonable kernel sizes, the overhead of copying the data to the GPU is not worth it.
|
||||
"""
|
||||
h, w = kernel.shape
|
||||
pad_h, pad_w = h // 2, w // 2
|
||||
|
||||
padded = torch.nn.functional.pad(image, (pad_w, pad_w, pad_h, pad_h), mode="constant", value=0)
|
||||
result = torch.zeros_like(image)
|
||||
|
||||
# This looks like it's inside out, but it does the same thing and is more efficient
|
||||
for i in range(h):
|
||||
for j in range(w):
|
||||
weight = kernel[i, j]
|
||||
if weight <= 0:
|
||||
continue
|
||||
|
||||
# Extract the region from padded tensor
|
||||
region = padded[i : i + image.shape[0], j : j + image.shape[1]]
|
||||
|
||||
# Apply weight and update max
|
||||
result = torch.maximum(result, region * weight)
|
||||
|
||||
return result
|
||||
|
||||
@@ -608,6 +608,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
end_step_percent=single_ip_adapter.end_step_percent,
|
||||
ip_adapter_conditioning=IPAdapterConditioningInfo(image_prompt_embeds, uncond_image_prompt_embeds),
|
||||
mask=mask,
|
||||
method=single_ip_adapter.method,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -61,6 +61,9 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
SigLipModel = "SigLipModelField"
|
||||
FluxReduxModel = "FluxReduxModelField"
|
||||
LlavaOnevisionModel = "LLaVAModelField"
|
||||
Imagen3Model = "Imagen3ModelField"
|
||||
Imagen4Model = "Imagen4ModelField"
|
||||
ChatGPT4oModel = "ChatGPT4oModelField"
|
||||
# endregion
|
||||
|
||||
# region Misc Field Types
|
||||
@@ -398,8 +401,8 @@ class InputFieldJSONSchemaExtra(BaseModel):
|
||||
"""
|
||||
|
||||
input: Input
|
||||
orig_required: bool
|
||||
field_kind: FieldKind
|
||||
orig_required: bool = True
|
||||
default: Optional[Any] = None
|
||||
orig_default: Optional[Any] = None
|
||||
ui_hidden: bool = False
|
||||
@@ -434,7 +437,7 @@ class WithWorkflow:
|
||||
workflow = None
|
||||
|
||||
def __init_subclass__(cls) -> None:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
f"{cls.__module__.split('.')[0]}.{cls.__name__}: WithWorkflow is deprecated. Use `context.workflow` to access the workflow."
|
||||
)
|
||||
super().__init_subclass__()
|
||||
@@ -496,7 +499,7 @@ def InputField(
|
||||
input: Input = Input.Any,
|
||||
ui_type: Optional[UIType] = None,
|
||||
ui_component: Optional[UIComponent] = None,
|
||||
ui_hidden: bool = False,
|
||||
ui_hidden: Optional[bool] = None,
|
||||
ui_order: Optional[int] = None,
|
||||
ui_choice_labels: Optional[dict[str, str]] = None,
|
||||
) -> Any:
|
||||
@@ -532,15 +535,20 @@ def InputField(
|
||||
|
||||
json_schema_extra_ = InputFieldJSONSchemaExtra(
|
||||
input=input,
|
||||
ui_type=ui_type,
|
||||
ui_component=ui_component,
|
||||
ui_hidden=ui_hidden,
|
||||
ui_order=ui_order,
|
||||
ui_choice_labels=ui_choice_labels,
|
||||
field_kind=FieldKind.Input,
|
||||
orig_required=True,
|
||||
)
|
||||
|
||||
if ui_type is not None:
|
||||
json_schema_extra_.ui_type = ui_type
|
||||
if ui_component is not None:
|
||||
json_schema_extra_.ui_component = ui_component
|
||||
if ui_hidden is not None:
|
||||
json_schema_extra_.ui_hidden = ui_hidden
|
||||
if ui_order is not None:
|
||||
json_schema_extra_.ui_order = ui_order
|
||||
if ui_choice_labels is not None:
|
||||
json_schema_extra_.ui_choice_labels = ui_choice_labels
|
||||
|
||||
"""
|
||||
There is a conflict between the typing of invocation definitions and the typing of an invocation's
|
||||
`invoke()` function.
|
||||
@@ -570,7 +578,7 @@ def InputField(
|
||||
|
||||
if default_factory is not _Unset and default_factory is not None:
|
||||
default = default_factory()
|
||||
logger.warn('"default_factory" is not supported, calling it now to set "default"')
|
||||
logger.warning('"default_factory" is not supported, calling it now to set "default"')
|
||||
|
||||
# These are the args we may wish pass to the pydantic `Field()` function
|
||||
field_args = {
|
||||
@@ -612,7 +620,7 @@ def InputField(
|
||||
|
||||
return Field(
|
||||
**provided_args,
|
||||
json_schema_extra=json_schema_extra_.model_dump(exclude_none=True),
|
||||
json_schema_extra=json_schema_extra_.model_dump(exclude_unset=True),
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -21,14 +21,14 @@ class IdealSizeOutput(BaseInvocationOutput):
|
||||
"ideal_size",
|
||||
title="Ideal Size - SD1.5, SDXL",
|
||||
tags=["latents", "math", "ideal_size"],
|
||||
version="1.0.5",
|
||||
version="1.0.6",
|
||||
)
|
||||
class IdealSizeInvocation(BaseInvocation):
|
||||
"""Calculates the ideal size for generation to avoid duplication"""
|
||||
|
||||
width: int = InputField(default=1024, description="Final image width")
|
||||
height: int = InputField(default=576, description="Final image height")
|
||||
unet: UNetField = InputField(default=None, description=FieldDescriptions.unet)
|
||||
unet: UNetField = InputField(description=FieldDescriptions.unet)
|
||||
multiplier: float = InputField(
|
||||
default=1.0,
|
||||
description="Amount to multiply the model's dimensions by when calculating the ideal size (may result in "
|
||||
|
||||
@@ -975,13 +975,13 @@ class SaveImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Canvas Paste Back",
|
||||
tags=["image", "combine"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class CanvasPasteBackInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Combines two images by using the mask provided. Intended for use on the Unified Canvas."""
|
||||
|
||||
source_image: ImageField = InputField(description="The source image")
|
||||
target_image: ImageField = InputField(default=None, description="The target image")
|
||||
target_image: ImageField = InputField(description="The target image")
|
||||
mask: ImageField = InputField(
|
||||
description="The mask to use when pasting",
|
||||
)
|
||||
@@ -1218,12 +1218,15 @@ class ApplyMaskToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Add Image Noise",
|
||||
tags=["image", "noise"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
version="1.1.0",
|
||||
)
|
||||
class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add noise to an image"""
|
||||
|
||||
image: ImageField = InputField(description="The image to add noise to")
|
||||
mask: Optional[ImageField] = InputField(
|
||||
default=None, description="Optional mask determining where to apply noise (black=noise, white=no noise)"
|
||||
)
|
||||
seed: int = InputField(
|
||||
default=0,
|
||||
ge=0,
|
||||
@@ -1267,12 +1270,27 @@ class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
noise = Image.fromarray(noise.astype(numpy.uint8), mode="RGB").resize(
|
||||
(image.width, image.height), Image.Resampling.NEAREST
|
||||
)
|
||||
|
||||
# Create a noisy version of the input image
|
||||
noisy_image = Image.blend(image.convert("RGB"), noise, self.amount).convert("RGBA")
|
||||
|
||||
# Paste back the alpha channel
|
||||
noisy_image.putalpha(alpha)
|
||||
# Apply mask if provided
|
||||
if self.mask is not None:
|
||||
mask_image = context.images.get_pil(self.mask.image_name, mode="L")
|
||||
|
||||
image_dto = context.images.save(image=noisy_image)
|
||||
if mask_image.size != image.size:
|
||||
mask_image = mask_image.resize(image.size, Image.Resampling.LANCZOS)
|
||||
|
||||
result_image = image.copy()
|
||||
mask_image = ImageOps.invert(mask_image)
|
||||
result_image.paste(noisy_image, (0, 0), mask=mask_image)
|
||||
else:
|
||||
result_image = noisy_image
|
||||
|
||||
# Paste back the alpha channel from the original image
|
||||
result_image.putalpha(alpha)
|
||||
|
||||
image_dto = context.images.save(image=result_image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ class IPAdapterField(BaseModel):
|
||||
image_encoder_model: ModelIdentifierField = Field(description="The name of the CLIP image encoder model.")
|
||||
weight: Union[float, List[float]] = Field(default=1, description="The weight given to the IP-Adapter.")
|
||||
target_blocks: List[str] = Field(default=[], description="The IP Adapter blocks to apply")
|
||||
method: str = Field(default="full", description="Weight apply method")
|
||||
begin_step_percent: float = Field(
|
||||
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
||||
)
|
||||
@@ -94,7 +95,7 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
weight: Union[float, List[float]] = InputField(
|
||||
default=1, description="The weight given to the IP-Adapter", title="Weight"
|
||||
)
|
||||
method: Literal["full", "style", "composition"] = InputField(
|
||||
method: Literal["full", "style", "composition", "style_strong", "style_precise"] = InputField(
|
||||
default="full", description="The method to apply the IP-Adapter"
|
||||
)
|
||||
begin_step_percent: float = InputField(
|
||||
@@ -147,6 +148,38 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
target_blocks = ["down_blocks.2.attentions.1"]
|
||||
else:
|
||||
raise ValueError(f"Unsupported IP-Adapter base type: '{ip_adapter_info.base}'.")
|
||||
elif self.method == "style_precise":
|
||||
if ip_adapter_info.base == "sd-1":
|
||||
target_blocks = ["up_blocks.1", "down_blocks.2", "mid_block"]
|
||||
elif ip_adapter_info.base == "sdxl":
|
||||
target_blocks = ["up_blocks.0.attentions.1", "down_blocks.2.attentions.1"]
|
||||
else:
|
||||
raise ValueError(f"Unsupported IP-Adapter base type: '{ip_adapter_info.base}'.")
|
||||
elif self.method == "style_strong":
|
||||
if ip_adapter_info.base == "sd-1":
|
||||
target_blocks = ["up_blocks.0", "up_blocks.1", "up_blocks.2", "down_blocks.0", "down_blocks.1"]
|
||||
elif ip_adapter_info.base == "sdxl":
|
||||
target_blocks = [
|
||||
"up_blocks.0.attentions.1",
|
||||
"up_blocks.1.attentions.1",
|
||||
"up_blocks.2.attentions.1",
|
||||
"up_blocks.0.attentions.2",
|
||||
"up_blocks.1.attentions.2",
|
||||
"up_blocks.2.attentions.2",
|
||||
"up_blocks.0.attentions.0",
|
||||
"up_blocks.1.attentions.0",
|
||||
"up_blocks.2.attentions.0",
|
||||
"down_blocks.0.attentions.0",
|
||||
"down_blocks.0.attentions.1",
|
||||
"down_blocks.0.attentions.2",
|
||||
"down_blocks.1.attentions.0",
|
||||
"down_blocks.1.attentions.1",
|
||||
"down_blocks.1.attentions.2",
|
||||
"down_blocks.2.attentions.0",
|
||||
"down_blocks.2.attentions.2",
|
||||
]
|
||||
else:
|
||||
raise ValueError(f"Unsupported IP-Adapter base type: '{ip_adapter_info.base}'.")
|
||||
elif self.method == "full":
|
||||
target_blocks = ["block"]
|
||||
else:
|
||||
@@ -162,6 +195,7 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
begin_step_percent=self.begin_step_percent,
|
||||
end_step_percent=self.end_step_percent,
|
||||
mask=self.mask,
|
||||
method=self.method,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -42,7 +42,9 @@ class IPAdapterMetadataField(BaseModel):
|
||||
image: ImageField = Field(description="The IP-Adapter image prompt.")
|
||||
ip_adapter_model: ModelIdentifierField = Field(description="The IP-Adapter model.")
|
||||
clip_vision_model: Literal["ViT-L", "ViT-H", "ViT-G"] = Field(description="The CLIP Vision model")
|
||||
method: Literal["full", "style", "composition"] = Field(description="Method to apply IP Weights with")
|
||||
method: Literal["full", "style", "composition", "style_strong", "style_precise"] = Field(
|
||||
description="Method to apply IP Weights with"
|
||||
)
|
||||
weight: Union[float, list[float]] = Field(description="The weight given to the IP-Adapter")
|
||||
begin_step_percent: float = Field(description="When the IP-Adapter is first applied (% of total steps)")
|
||||
end_step_percent: float = Field(description="When the IP-Adapter is last applied (% of total steps)")
|
||||
|
||||
@@ -6,7 +6,7 @@ import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field
|
||||
from transformers import AutoModelForMaskGeneration, AutoProcessor
|
||||
from transformers import AutoProcessor
|
||||
from transformers.models.sam import SamModel
|
||||
from transformers.models.sam.processing_sam import SamProcessor
|
||||
|
||||
@@ -104,14 +104,13 @@ class SegmentAnythingInvocation(BaseInvocation):
|
||||
|
||||
@staticmethod
|
||||
def _load_sam_model(model_path: Path):
|
||||
sam_model = AutoModelForMaskGeneration.from_pretrained(
|
||||
sam_model = SamModel.from_pretrained(
|
||||
model_path,
|
||||
local_files_only=True,
|
||||
# TODO(ryand): Setting the torch_dtype here doesn't work. Investigate whether fp16 is supported by the
|
||||
# model, and figure out how to make it work in the pipeline.
|
||||
# torch_dtype=TorchDevice.choose_torch_dtype(),
|
||||
)
|
||||
assert isinstance(sam_model, SamModel)
|
||||
|
||||
sam_processor = AutoProcessor.from_pretrained(model_path, local_files_only=True)
|
||||
assert isinstance(sam_processor, SamProcessor)
|
||||
|
||||
@@ -1,12 +1,3 @@
|
||||
import uvicorn
|
||||
|
||||
from invokeai.app.invocations.load_custom_nodes import load_custom_nodes
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
|
||||
|
||||
def get_app():
|
||||
"""Import the app and event loop. We wrap this in a function to more explicitly control when it happens, because
|
||||
importing from api_app does a bunch of stuff - it's more like calling a function than importing a module.
|
||||
@@ -18,9 +9,18 @@ def get_app():
|
||||
|
||||
def run_app() -> None:
|
||||
"""The main entrypoint for the app."""
|
||||
# Parse the CLI arguments.
|
||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
|
||||
# Parse the CLI arguments before doing anything else, which ensures CLI args correctly override settings from other
|
||||
# sources like `invokeai.yaml` or env vars.
|
||||
InvokeAIArgs.parse_args()
|
||||
|
||||
import uvicorn
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.torch_cuda_allocator import configure_torch_cuda_allocator
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
# Load config.
|
||||
app_config = get_config()
|
||||
|
||||
@@ -32,6 +32,8 @@ def run_app() -> None:
|
||||
configure_torch_cuda_allocator(app_config.pytorch_cuda_alloc_conf, logger)
|
||||
|
||||
# This import must happen after configure_torch_cuda_allocator() is called, because the module imports torch.
|
||||
from invokeai.app.invocations.baseinvocation import InvocationRegistry
|
||||
from invokeai.app.invocations.load_custom_nodes import load_custom_nodes
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
torch_device_name = TorchDevice.get_torch_device_name()
|
||||
@@ -66,6 +68,15 @@ def run_app() -> None:
|
||||
# core nodes have been imported so that we can catch when a custom node clobbers a core node.
|
||||
load_custom_nodes(custom_nodes_path=app_config.custom_nodes_path, logger=logger)
|
||||
|
||||
# Check all invocations and ensure their outputs are registered.
|
||||
for invocation in InvocationRegistry.get_invocation_classes():
|
||||
invocation_type = invocation.get_type()
|
||||
output_annotation = invocation.get_output_annotation()
|
||||
if output_annotation not in InvocationRegistry.get_output_classes():
|
||||
logger.warning(
|
||||
f'Invocation "{invocation_type}" has unregistered output class "{output_annotation.__name__}"'
|
||||
)
|
||||
|
||||
if app_config.dev_reload:
|
||||
# load_custom_nodes seems to bypass jurrigged's import sniffer, so be sure to call it *after* they're already
|
||||
# imported.
|
||||
|
||||
@@ -98,9 +98,18 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
FROM images
|
||||
LEFT JOIN board_images ON board_images.image_name = images.image_name
|
||||
WHERE 1=1
|
||||
"""
|
||||
|
||||
# Handle board_id filter
|
||||
if board_id == "none":
|
||||
stmt += """--sql
|
||||
AND board_images.board_id IS NULL
|
||||
"""
|
||||
else:
|
||||
stmt += """--sql
|
||||
AND board_images.board_id = ?
|
||||
"""
|
||||
params.append(board_id)
|
||||
params.append(board_id)
|
||||
|
||||
# Add the category filter
|
||||
if categories is not None:
|
||||
|
||||
@@ -24,7 +24,6 @@ from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
INIT_FILE = Path("invokeai.yaml")
|
||||
DB_FILE = Path("invokeai.db")
|
||||
LEGACY_INIT_FILE = Path("invokeai.init")
|
||||
DEVICE = Literal["auto", "cpu", "cuda", "cuda:1", "mps"]
|
||||
PRECISION = Literal["auto", "float16", "bfloat16", "float32"]
|
||||
ATTENTION_TYPE = Literal["auto", "normal", "xformers", "sliced", "torch-sdp"]
|
||||
ATTENTION_SLICE_SIZE = Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8]
|
||||
@@ -93,7 +92,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
|
||||
pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
|
||||
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
|
||||
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
|
||||
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
|
||||
sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
|
||||
attention_type: Attention type.<br>Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
|
||||
@@ -176,7 +175,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
pytorch_cuda_alloc_conf: Optional[str] = Field(default=None, description="Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to \"backend:cudaMallocAsync\" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.")
|
||||
|
||||
# DEVICE
|
||||
device: DEVICE = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.")
|
||||
device: str = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)", pattern=r"^(auto|cpu|mps|cuda(:\d+)?)$")
|
||||
precision: PRECISION = Field(default="auto", description="Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.")
|
||||
|
||||
# GENERATION
|
||||
|
||||
@@ -241,6 +241,7 @@ class QueueItemStatusChangedEvent(QueueItemEventBase):
|
||||
batch_status: BatchStatus = Field(description="The status of the batch")
|
||||
queue_status: SessionQueueStatus = Field(description="The status of the queue")
|
||||
session_id: str = Field(description="The ID of the session (aka graph execution state)")
|
||||
credits: Optional[float] = Field(default=None, description="The total credits used for this queue item")
|
||||
|
||||
@classmethod
|
||||
def build(
|
||||
@@ -263,6 +264,7 @@ class QueueItemStatusChangedEvent(QueueItemEventBase):
|
||||
completed_at=str(queue_item.completed_at) if queue_item.completed_at else None,
|
||||
batch_status=batch_status,
|
||||
queue_status=queue_status,
|
||||
credits=queue_item.credits,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -196,9 +196,13 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
# Search term condition
|
||||
if search_term:
|
||||
query_conditions += """--sql
|
||||
AND images.metadata LIKE ?
|
||||
AND (
|
||||
images.metadata LIKE ?
|
||||
OR images.created_at LIKE ?
|
||||
)
|
||||
"""
|
||||
query_params.append(f"%{search_term.lower()}%")
|
||||
query_params.append(f"%{search_term.lower()}%")
|
||||
|
||||
if starred_first:
|
||||
query_pagination = f"""--sql
|
||||
|
||||
@@ -78,7 +78,7 @@ class ImageService(ImageServiceABC):
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.warn(f"Failed to add image to board {board_id}: {str(e)}")
|
||||
self.__invoker.services.logger.warning(f"Failed to add image to board {board_id}: {str(e)}")
|
||||
self.__invoker.services.image_files.save(
|
||||
image_name=image_name, image=image, metadata=metadata, workflow=workflow, graph=graph
|
||||
)
|
||||
|
||||
@@ -27,6 +27,10 @@ if TYPE_CHECKING:
|
||||
from invokeai.app.services.invocation_stats.invocation_stats_base import InvocationStatsServiceBase
|
||||
from invokeai.app.services.model_images.model_images_base import ModelImageFileStorageBase
|
||||
from invokeai.app.services.model_manager.model_manager_base import ModelManagerServiceBase
|
||||
from invokeai.app.services.model_relationship_records.model_relationship_records_base import (
|
||||
ModelRelationshipRecordStorageBase,
|
||||
)
|
||||
from invokeai.app.services.model_relationships.model_relationships_base import ModelRelationshipsServiceABC
|
||||
from invokeai.app.services.names.names_base import NameServiceBase
|
||||
from invokeai.app.services.session_processor.session_processor_base import SessionProcessorBase
|
||||
from invokeai.app.services.session_queue.session_queue_base import SessionQueueBase
|
||||
@@ -54,6 +58,8 @@ class InvocationServices:
|
||||
logger: "Logger",
|
||||
model_images: "ModelImageFileStorageBase",
|
||||
model_manager: "ModelManagerServiceBase",
|
||||
model_relationships: "ModelRelationshipsServiceABC",
|
||||
model_relationship_records: "ModelRelationshipRecordStorageBase",
|
||||
download_queue: "DownloadQueueServiceBase",
|
||||
performance_statistics: "InvocationStatsServiceBase",
|
||||
session_queue: "SessionQueueBase",
|
||||
@@ -81,6 +87,8 @@ class InvocationServices:
|
||||
self.logger = logger
|
||||
self.model_images = model_images
|
||||
self.model_manager = model_manager
|
||||
self.model_relationships = model_relationships
|
||||
self.model_relationship_records = model_relationship_records
|
||||
self.download_queue = download_queue
|
||||
self.performance_statistics = performance_statistics
|
||||
self.session_queue = session_queue
|
||||
|
||||
@@ -60,7 +60,7 @@ class InvocationStatsServiceBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def reset_stats(self):
|
||||
def reset_stats(self, graph_execution_state_id: str) -> None:
|
||||
"""Reset all stored statistics."""
|
||||
pass
|
||||
|
||||
|
||||
@@ -73,9 +73,9 @@ class InvocationStatsService(InvocationStatsServiceBase):
|
||||
)
|
||||
self._stats[graph_execution_state_id].add_node_execution_stats(node_stats)
|
||||
|
||||
def reset_stats(self):
|
||||
self._stats = {}
|
||||
self._cache_stats = {}
|
||||
def reset_stats(self, graph_execution_state_id: str) -> None:
|
||||
self._stats.pop(graph_execution_state_id, None)
|
||||
self._cache_stats.pop(graph_execution_state_id, None)
|
||||
|
||||
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
|
||||
graph_stats_summary = self._get_graph_summary(graph_execution_state_id)
|
||||
|
||||
@@ -38,6 +38,7 @@ from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
CheckpointConfigBase,
|
||||
InvalidModelConfigException,
|
||||
ModelConfigBase,
|
||||
)
|
||||
from invokeai.backend.model_manager.legacy_probe import ModelProbe
|
||||
from invokeai.backend.model_manager.metadata import (
|
||||
@@ -147,7 +148,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
def _clear_pending_jobs(self) -> None:
|
||||
for job in self.list_jobs():
|
||||
if not job.in_terminal_state:
|
||||
self._logger.warning("Cancelling job {job.id}")
|
||||
self._logger.warning(f"Cancelling job {job.id}")
|
||||
self.cancel_job(job)
|
||||
while True:
|
||||
try:
|
||||
@@ -646,14 +647,18 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
hash_algo = self._app_config.hashing_algorithm
|
||||
fields = config.model_dump()
|
||||
|
||||
return ModelProbe.probe(model_path=model_path, fields=fields, hash_algo=hash_algo)
|
||||
|
||||
# New model probe API is disabled pending resolution of issue caused by a change of the ordering of checks.
|
||||
# See commit message for details.
|
||||
# try:
|
||||
# return ModelConfigBase.classify(model_path=model_path, hash_algo=hash_algo, **fields)
|
||||
# except InvalidModelConfigException:
|
||||
# return ModelProbe.probe(model_path=model_path, fields=fields, hash_algo=hash_algo) # type: ignore
|
||||
# WARNING!
|
||||
# The legacy probe relies on the implicit order of tests to determine model classification.
|
||||
# This can lead to regressions between the legacy and new probes.
|
||||
# Do NOT change the order of `probe` and `classify` without implementing one of the following fixes:
|
||||
# Short-term fix: `classify` tests `matches` in the same order as the legacy probe.
|
||||
# Long-term fix: Improve `matches` to be more specific so that only one config matches
|
||||
# any given model - eliminating ambiguity and removing reliance on order.
|
||||
# After implementing either of these fixes, remove @pytest.mark.xfail from `test_regression_against_model_probe`
|
||||
try:
|
||||
return ModelProbe.probe(model_path=model_path, fields=fields, hash_algo=hash_algo) # type: ignore
|
||||
except InvalidModelConfigException:
|
||||
return ModelConfigBase.classify(model_path, hash_algo, **fields)
|
||||
|
||||
def _register(
|
||||
self, model_path: Path, config: Optional[ModelRecordChanges] = None, info: Optional[AnyModelConfig] = None
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class ModelRelationshipRecordStorageBase(ABC):
|
||||
"""Abstract base class for model-to-model relationship record storage."""
|
||||
|
||||
@abstractmethod
|
||||
def add_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
||||
"""Creates a relationship between two models by keys."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def remove_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
||||
"""Removes a relationship between two models by keys."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_related_model_keys(self, model_key: str) -> list[str]:
|
||||
"""Gets all models keys related to a given model key."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_related_model_keys_batch(self, model_keys: list[str]) -> list[str]:
|
||||
"""Get related model keys for multiple models given a list of keys."""
|
||||
pass
|
||||
@@ -0,0 +1,66 @@
|
||||
import sqlite3
|
||||
|
||||
from invokeai.app.services.model_relationship_records.model_relationship_records_base import (
|
||||
ModelRelationshipRecordStorageBase,
|
||||
)
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
|
||||
|
||||
class SqliteModelRelationshipRecordStorage(ModelRelationshipRecordStorageBase):
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
super().__init__()
|
||||
self._conn = db.conn
|
||||
|
||||
def add_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
||||
if model_key_1 == model_key_2:
|
||||
raise ValueError("Cannot relate a model to itself.")
|
||||
a, b = sorted([model_key_1, model_key_2])
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"INSERT OR IGNORE INTO model_relationships (model_key_1, model_key_2) VALUES (?, ?)",
|
||||
(a, b),
|
||||
)
|
||||
self._conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
|
||||
def remove_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
||||
a, b = sorted([model_key_1, model_key_2])
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"DELETE FROM model_relationships WHERE model_key_1 = ? AND model_key_2 = ?",
|
||||
(a, b),
|
||||
)
|
||||
self._conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
|
||||
def get_related_model_keys(self, model_key: str) -> list[str]:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT model_key_2 FROM model_relationships WHERE model_key_1 = ?
|
||||
UNION
|
||||
SELECT model_key_1 FROM model_relationships WHERE model_key_2 = ?
|
||||
""",
|
||||
(model_key, model_key),
|
||||
)
|
||||
return [row[0] for row in cursor.fetchall()]
|
||||
|
||||
def get_related_model_keys_batch(self, model_keys: list[str]) -> list[str]:
|
||||
cursor = self._conn.cursor()
|
||||
|
||||
key_list = ",".join("?" for _ in model_keys)
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT model_key_2 FROM model_relationships WHERE model_key_1 IN ({key_list})
|
||||
UNION
|
||||
SELECT model_key_1 FROM model_relationships WHERE model_key_2 IN ({key_list})
|
||||
""",
|
||||
model_keys + model_keys,
|
||||
)
|
||||
return [row[0] for row in cursor.fetchall()]
|
||||
@@ -0,0 +1,25 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class ModelRelationshipsServiceABC(ABC):
|
||||
"""High-level service for managing model-to-model relationships."""
|
||||
|
||||
@abstractmethod
|
||||
def add_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
||||
"""Creates a relationship between two models keys."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def remove_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
||||
"""Removes a relationship between two models keys."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_related_model_keys(self, model_key: str) -> list[str]:
|
||||
"""Gets all models keys related to a given model key."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_related_model_keys_batch(self, model_keys: list[str]) -> list[str]:
|
||||
"""Get related model keys for multiple models."""
|
||||
pass
|
||||
@@ -0,0 +1,9 @@
|
||||
from datetime import datetime
|
||||
|
||||
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
|
||||
|
||||
|
||||
class ModelRelationship(BaseModelExcludeNull):
|
||||
model_key_1: str
|
||||
model_key_2: str
|
||||
created_at: datetime
|
||||
@@ -0,0 +1,31 @@
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_relationships.model_relationships_base import ModelRelationshipsServiceABC
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
|
||||
|
||||
class ModelRelationshipsService(ModelRelationshipsServiceABC):
|
||||
__invoker: Invoker
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self.__invoker = invoker
|
||||
|
||||
def add_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
||||
self.__invoker.services.model_relationship_records.add_model_relationship(model_key_1, model_key_2)
|
||||
|
||||
def remove_model_relationship(self, model_key_1: str, model_key_2: str) -> None:
|
||||
self.__invoker.services.model_relationship_records.remove_model_relationship(model_key_1, model_key_2)
|
||||
|
||||
def get_related_model_keys(self, model_key: str) -> list[str]:
|
||||
return self.__invoker.services.model_relationship_records.get_related_model_keys(model_key)
|
||||
|
||||
def add_relationship_from_models(self, model_1: AnyModelConfig, model_2: AnyModelConfig) -> None:
|
||||
self.add_model_relationship(model_1.key, model_2.key)
|
||||
|
||||
def remove_relationship_from_models(self, model_1: AnyModelConfig, model_2: AnyModelConfig) -> None:
|
||||
self.remove_model_relationship(model_1.key, model_2.key)
|
||||
|
||||
def get_related_keys_from_model(self, model: AnyModelConfig) -> list[str]:
|
||||
return self.get_related_model_keys(model.key)
|
||||
|
||||
def get_related_model_keys_batch(self, model_keys: list[str]) -> list[str]:
|
||||
return self.__invoker.services.model_relationship_records.get_related_model_keys_batch(model_keys)
|
||||
@@ -1,3 +1,4 @@
|
||||
import gc
|
||||
import traceback
|
||||
from contextlib import suppress
|
||||
from threading import BoundedSemaphore, Thread
|
||||
@@ -210,7 +211,7 @@ class DefaultSessionRunner(SessionRunnerBase):
|
||||
# we don't care about that - suppress the error.
|
||||
with suppress(GESStatsNotFoundError):
|
||||
self._services.performance_statistics.log_stats(queue_item.session.id)
|
||||
self._services.performance_statistics.reset_stats()
|
||||
self._services.performance_statistics.reset_stats(queue_item.session.id)
|
||||
|
||||
for callback in self._on_after_run_session_callbacks:
|
||||
callback(queue_item=queue_item)
|
||||
@@ -439,6 +440,12 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
poll_now_event.wait(self._polling_interval)
|
||||
continue
|
||||
|
||||
# GC-ing here can reduce peak memory usage of the invoke process by freeing allocated memory blocks.
|
||||
# Most queue items take seconds to execute, so the relative cost of a GC is very small.
|
||||
# Python will never cede allocated memory back to the OS, so anything we can do to reduce the peak
|
||||
# allocation is well worth it.
|
||||
gc.collect()
|
||||
|
||||
self._invoker.services.logger.info(
|
||||
f"Executing queue item {self._queue_item.item_id}, session {self._queue_item.session_id}"
|
||||
)
|
||||
|
||||
@@ -148,7 +148,7 @@ class Batch(BaseModel):
|
||||
node = cast(BaseInvocation, graph.get_node(batch_data.node_path))
|
||||
except NodeNotFoundError:
|
||||
raise NodeNotFoundError(f"Node {batch_data.node_path} not found in graph")
|
||||
if batch_data.field_name not in node.model_fields:
|
||||
if batch_data.field_name not in type(node).model_fields:
|
||||
raise NodeNotFoundError(f"Field {batch_data.field_name} not found in node {batch_data.node_path}")
|
||||
return values
|
||||
|
||||
@@ -257,6 +257,7 @@ class SessionQueueItemWithoutGraph(BaseModel):
|
||||
api_output_fields: Optional[list[FieldIdentifier]] = Field(
|
||||
default=None, description="The nodes that were used as output from the API"
|
||||
)
|
||||
credits: Optional[float] = Field(default=None, description="The total credits used for this queue item")
|
||||
|
||||
@classmethod
|
||||
def queue_item_dto_from_dict(cls, queue_item_dict: dict) -> "SessionQueueItemDTO":
|
||||
|
||||
@@ -104,11 +104,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
return cast(Union[int, None], cursor.fetchone()[0]) or 0
|
||||
|
||||
async def enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
||||
return await asyncio.to_thread(self._enqueue_batch, queue_id, batch, prepend)
|
||||
|
||||
def _enqueue_batch(self, queue_id: str, batch: Batch, prepend: bool) -> EnqueueBatchResult:
|
||||
try:
|
||||
cursor = self._conn.cursor()
|
||||
# TODO: how does this work in a multi-user scenario?
|
||||
current_queue_size = self._get_current_queue_size(queue_id)
|
||||
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||
@@ -118,8 +114,12 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
if prepend:
|
||||
priority = self._get_highest_priority(queue_id) + 1
|
||||
|
||||
requested_count = calc_session_count(batch)
|
||||
values_to_insert = prepare_values_to_insert(
|
||||
requested_count = await asyncio.to_thread(
|
||||
calc_session_count,
|
||||
batch=batch,
|
||||
)
|
||||
values_to_insert = await asyncio.to_thread(
|
||||
prepare_values_to_insert,
|
||||
queue_id=queue_id,
|
||||
batch=batch,
|
||||
priority=priority,
|
||||
@@ -127,19 +127,16 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
)
|
||||
enqueued_count = len(values_to_insert)
|
||||
|
||||
if requested_count > enqueued_count:
|
||||
values_to_insert = values_to_insert[:max_new_queue_items]
|
||||
|
||||
cursor.executemany(
|
||||
"""--sql
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
values_to_insert,
|
||||
)
|
||||
self._conn.commit()
|
||||
with self._conn:
|
||||
cursor = self._conn.cursor()
|
||||
cursor.executemany(
|
||||
"""--sql
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
values_to_insert,
|
||||
)
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
enqueue_result = EnqueueBatchResult(
|
||||
queue_id=queue_id,
|
||||
|
||||
@@ -424,7 +424,7 @@ class Graph(BaseModel):
|
||||
)
|
||||
|
||||
# input fields are on the node
|
||||
if edge.destination.field not in destination_node.model_fields:
|
||||
if edge.destination.field not in type(destination_node).model_fields:
|
||||
raise NodeFieldNotFoundError(
|
||||
f"Edge destination field {edge.destination.field} does not exist in node {edge.destination.node_id}"
|
||||
)
|
||||
|
||||
@@ -22,6 +22,7 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_16 import
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_17 import build_migration_17
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_18 import build_migration_18
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_19 import build_migration_19
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_20 import build_migration_20
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
||||
|
||||
|
||||
@@ -61,6 +62,7 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
|
||||
migrator.register_migration(build_migration_17())
|
||||
migrator.register_migration(build_migration_18())
|
||||
migrator.register_migration(build_migration_19(app_config=config))
|
||||
migrator.register_migration(build_migration_20())
|
||||
migrator.run_migrations()
|
||||
|
||||
return db
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
import sqlite3
|
||||
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
|
||||
|
||||
class Migration20Callback:
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
cursor.execute(
|
||||
"""
|
||||
-- many-to-many relationship table for models
|
||||
CREATE TABLE IF NOT EXISTS model_relationships (
|
||||
-- model_key_1 and model_key_2 are the same as the key(primary key) in the models table
|
||||
model_key_1 TEXT NOT NULL,
|
||||
model_key_2 TEXT NOT NULL,
|
||||
created_at TEXT DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
PRIMARY KEY (model_key_1, model_key_2),
|
||||
-- model_key_1 < model_key_2, to ensure uniqueness and prevent duplicates
|
||||
FOREIGN KEY (model_key_1) REFERENCES models(id) ON DELETE CASCADE,
|
||||
FOREIGN KEY (model_key_2) REFERENCES models(id) ON DELETE CASCADE
|
||||
);
|
||||
"""
|
||||
)
|
||||
cursor.execute(
|
||||
"""
|
||||
-- Creates an index to keep performance equal when searching for model_key_1 or model_key_2
|
||||
CREATE INDEX IF NOT EXISTS keyx_model_relationships_model_key_2
|
||||
ON model_relationships(model_key_2)
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def build_migration_20() -> Migration:
|
||||
return Migration(
|
||||
from_version=19,
|
||||
to_version=20,
|
||||
callback=Migration20Callback(),
|
||||
)
|
||||
@@ -230,6 +230,86 @@ def heuristic_resize(np_img: np.ndarray[Any, Any], size: tuple[int, int]) -> np.
|
||||
return resized
|
||||
|
||||
|
||||
# precompute common kernels
|
||||
_KERNEL3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
|
||||
# directional masks for NMS
|
||||
_DIRS = [
|
||||
np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], np.uint8),
|
||||
np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], np.uint8),
|
||||
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], np.uint8),
|
||||
np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], np.uint8),
|
||||
]
|
||||
|
||||
|
||||
def heuristic_resize_fast(np_img: np.ndarray, size: tuple[int, int]) -> np.ndarray:
|
||||
h, w = np_img.shape[:2]
|
||||
# early exit
|
||||
if (w, h) == size:
|
||||
return np_img
|
||||
|
||||
# separate alpha channel
|
||||
img = np_img
|
||||
alpha = None
|
||||
if img.ndim == 3 and img.shape[2] == 4:
|
||||
alpha, img = img[:, :, 3], img[:, :, :3]
|
||||
|
||||
# build small sample for unique‐color & binary detection
|
||||
flat = img.reshape(-1, img.shape[-1])
|
||||
N = flat.shape[0]
|
||||
# include four corners to avoid missing extreme values
|
||||
corners = np.vstack([img[0, 0], img[0, w - 1], img[h - 1, 0], img[h - 1, w - 1]])
|
||||
cnt = min(N, 100_000)
|
||||
samp = np.vstack([corners, flat[np.random.choice(N, cnt, replace=False)]])
|
||||
uc = np.unique(samp, axis=0).shape[0]
|
||||
vmin, vmax = samp.min(), samp.max()
|
||||
|
||||
# detect binary edge map & one‐pixel‐edge case
|
||||
is_binary = uc == 2 and vmin < 16 and vmax > 240
|
||||
one_pixel_edge = False
|
||||
if is_binary:
|
||||
# single gray conversion
|
||||
gray0 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
grad = cv2.morphologyEx(gray0, cv2.MORPH_GRADIENT, _KERNEL3)
|
||||
cnt_edge = cv2.countNonZero(grad)
|
||||
cnt_all = cv2.countNonZero((gray0 > 127).astype(np.uint8))
|
||||
one_pixel_edge = (2 * cnt_edge) > cnt_all
|
||||
|
||||
# choose interp for color/seg/grayscale
|
||||
area_new, area_old = size[0] * size[1], w * h
|
||||
if 2 < uc < 200: # segmentation map
|
||||
interp = cv2.INTER_NEAREST
|
||||
elif area_new < area_old:
|
||||
interp = cv2.INTER_AREA
|
||||
else:
|
||||
interp = cv2.INTER_CUBIC
|
||||
|
||||
# single resize pass on RGB
|
||||
resized = cv2.resize(img, size, interpolation=interp)
|
||||
|
||||
if is_binary:
|
||||
# convert to gray & apply NMS via C++ dilate
|
||||
gray_r = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
|
||||
nms = np.zeros_like(gray_r)
|
||||
for K in _DIRS:
|
||||
d = cv2.dilate(gray_r, K)
|
||||
mask = d == gray_r
|
||||
nms[mask] = gray_r[mask]
|
||||
|
||||
# threshold + thinning if needed
|
||||
_, bw = cv2.threshold(nms, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
out_bin = cv2.ximgproc.thinning(bw) if one_pixel_edge else bw
|
||||
# restore 3 channels
|
||||
resized = np.stack([out_bin] * 3, axis=2)
|
||||
|
||||
# restore alpha with same interp as RGB for consistency
|
||||
if alpha is not None:
|
||||
am = cv2.resize(alpha, size, interpolation=interp)
|
||||
am = (am > 127).astype(np.uint8) * 255
|
||||
resized = np.dstack((resized, am))
|
||||
|
||||
return resized
|
||||
|
||||
|
||||
###########################################################################
|
||||
# Copied from detectmap_proc method in scripts/detectmap_proc.py in Mikubill/sd-webui-controlnet
|
||||
# modified for InvokeAI
|
||||
@@ -244,7 +324,7 @@ def np_img_resize(
|
||||
np_img = normalize_image_channel_count(np_img)
|
||||
|
||||
if resize_mode == "just_resize": # RESIZE
|
||||
np_img = heuristic_resize(np_img, (w, h))
|
||||
np_img = heuristic_resize_fast(np_img, (w, h))
|
||||
np_img = clone_contiguous(np_img)
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
|
||||
@@ -265,7 +345,7 @@ def np_img_resize(
|
||||
# Inpaint hijack
|
||||
high_quality_border_color[3] = 255
|
||||
high_quality_background = np.tile(high_quality_border_color[None, None], [h, w, 1])
|
||||
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
np_img = heuristic_resize_fast(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (h - new_h) // 2)
|
||||
pad_w = max(0, (w - new_w) // 2)
|
||||
@@ -275,7 +355,7 @@ def np_img_resize(
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
else: # resize_mode == "crop_resize" (INNER_FIT)
|
||||
k = max(k0, k1)
|
||||
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
np_img = heuristic_resize_fast(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (new_h - h) // 2)
|
||||
pad_w = max(0, (new_w - w) // 2)
|
||||
|
||||
@@ -12,6 +12,9 @@ from invokeai.app.invocations.fields import InputFieldJSONSchemaExtra, OutputFie
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.services.events.events_common import EventBase
|
||||
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
logger = InvokeAILogger.get_logger()
|
||||
|
||||
|
||||
def move_defs_to_top_level(openapi_schema: dict[str, Any], component_schema: dict[str, Any]) -> None:
|
||||
@@ -61,6 +64,10 @@ def get_openapi_func(
|
||||
# We need to manually add all outputs to the schema - pydantic doesn't add them because they aren't used directly.
|
||||
for output in InvocationRegistry.get_output_classes():
|
||||
json_schema = output.model_json_schema(mode="serialization", ref_template="#/components/schemas/{model}")
|
||||
# Remove output_metadata that is only used on back-end from the schema
|
||||
if "output_meta" in json_schema["properties"]:
|
||||
json_schema["properties"].pop("output_meta")
|
||||
|
||||
move_defs_to_top_level(openapi_schema, json_schema)
|
||||
openapi_schema["components"]["schemas"][output.__name__] = json_schema
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ def get_timestamp() -> int:
|
||||
|
||||
|
||||
def get_iso_timestamp() -> str:
|
||||
return datetime.datetime.utcnow().isoformat()
|
||||
return datetime.datetime.now(datetime.timezone.utc).isoformat()
|
||||
|
||||
|
||||
def get_datetime_from_iso_timestamp(iso_timestamp: str) -> datetime.datetime:
|
||||
|
||||
@@ -42,4 +42,5 @@ IP-Adapters:
|
||||
- [InvokeAI/ip_adapter_plus_sd15](https://huggingface.co/InvokeAI/ip_adapter_plus_sd15)
|
||||
- [InvokeAI/ip_adapter_plus_face_sd15](https://huggingface.co/InvokeAI/ip_adapter_plus_face_sd15)
|
||||
- [InvokeAI/ip_adapter_sdxl](https://huggingface.co/InvokeAI/ip_adapter_sdxl)
|
||||
- [InvokeAI/ip_adapter_sdxl_vit_h](https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h)
|
||||
- [InvokeAI/ip_adapter_sdxl_vit_h](https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h)
|
||||
- [InvokeAI/ip-adapter-plus_sdxl_vit-h](https://huggingface.co/InvokeAI/ip-adapter-plus_sdxl_vit-h)
|
||||
@@ -144,34 +144,37 @@ class ModelConfigBase(ABC, BaseModel):
|
||||
submodels: Optional[Dict[SubModelType, SubmodelDefinition]] = Field(
|
||||
description="Loadable submodels in this model", default=None
|
||||
)
|
||||
usage_info: Optional[str] = Field(default=None, description="Usage information for this model")
|
||||
|
||||
_USING_LEGACY_PROBE: ClassVar[set] = set()
|
||||
_USING_CLASSIFY_API: ClassVar[set] = set()
|
||||
USING_LEGACY_PROBE: ClassVar[set] = set()
|
||||
USING_CLASSIFY_API: ClassVar[set] = set()
|
||||
_MATCH_SPEED: ClassVar[MatchSpeed] = MatchSpeed.MED
|
||||
|
||||
def __init_subclass__(cls, **kwargs):
|
||||
super().__init_subclass__(**kwargs)
|
||||
if issubclass(cls, LegacyProbeMixin):
|
||||
ModelConfigBase._USING_LEGACY_PROBE.add(cls)
|
||||
ModelConfigBase.USING_LEGACY_PROBE.add(cls)
|
||||
else:
|
||||
ModelConfigBase._USING_CLASSIFY_API.add(cls)
|
||||
ModelConfigBase.USING_CLASSIFY_API.add(cls)
|
||||
|
||||
@staticmethod
|
||||
def all_config_classes():
|
||||
subclasses = ModelConfigBase._USING_LEGACY_PROBE | ModelConfigBase._USING_CLASSIFY_API
|
||||
subclasses = ModelConfigBase.USING_LEGACY_PROBE | ModelConfigBase.USING_CLASSIFY_API
|
||||
concrete = {cls for cls in subclasses if not isabstract(cls)}
|
||||
return concrete
|
||||
|
||||
@staticmethod
|
||||
def classify(model_path: Path, hash_algo: HASHING_ALGORITHMS = "blake3_single", **overrides):
|
||||
def classify(mod: str | Path | ModelOnDisk, hash_algo: HASHING_ALGORITHMS = "blake3_single", **overrides):
|
||||
"""
|
||||
Returns the best matching ModelConfig instance from a model's file/folder path.
|
||||
Raises InvalidModelConfigException if no valid configuration is found.
|
||||
Created to deprecate ModelProbe.probe
|
||||
"""
|
||||
candidates = ModelConfigBase._USING_CLASSIFY_API
|
||||
if isinstance(mod, Path | str):
|
||||
mod = ModelOnDisk(mod, hash_algo)
|
||||
|
||||
candidates = ModelConfigBase.USING_CLASSIFY_API
|
||||
sorted_by_match_speed = sorted(candidates, key=lambda cls: (cls._MATCH_SPEED, cls.__name__))
|
||||
mod = ModelOnDisk(model_path, hash_algo)
|
||||
|
||||
for config_cls in sorted_by_match_speed:
|
||||
try:
|
||||
@@ -293,7 +296,7 @@ class LoRAConfigBase(ABC, BaseModel):
|
||||
from invokeai.backend.patches.lora_conversions.formats import flux_format_from_state_dict
|
||||
|
||||
sd = mod.load_state_dict(mod.path)
|
||||
value = flux_format_from_state_dict(sd)
|
||||
value = flux_format_from_state_dict(sd, mod.metadata())
|
||||
mod.cache[key] = value
|
||||
return value
|
||||
|
||||
@@ -600,6 +603,21 @@ class LlavaOnevisionConfig(DiffusersConfigBase, ModelConfigBase):
|
||||
}
|
||||
|
||||
|
||||
class ApiModelConfig(MainConfigBase, ModelConfigBase):
|
||||
"""Model config for API-based models."""
|
||||
|
||||
format: Literal[ModelFormat.Api] = ModelFormat.Api
|
||||
|
||||
@classmethod
|
||||
def matches(cls, mod: ModelOnDisk) -> bool:
|
||||
# API models are not stored on disk, so we can't match them.
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def parse(cls, mod: ModelOnDisk) -> dict[str, Any]:
|
||||
raise NotImplementedError("API models are not parsed from disk.")
|
||||
|
||||
|
||||
def get_model_discriminator_value(v: Any) -> str:
|
||||
"""
|
||||
Computes the discriminator value for a model config.
|
||||
@@ -667,6 +685,7 @@ AnyModelConfig = Annotated[
|
||||
Annotated[SigLIPConfig, SigLIPConfig.get_tag()],
|
||||
Annotated[FluxReduxConfig, FluxReduxConfig.get_tag()],
|
||||
Annotated[LlavaOnevisionConfig, LlavaOnevisionConfig.get_tag()],
|
||||
Annotated[ApiModelConfig, ApiModelConfig.get_tag()],
|
||||
],
|
||||
Discriminator(get_model_discriminator_value),
|
||||
]
|
||||
|
||||
@@ -2,6 +2,8 @@ from typing import Any
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
|
||||
|
||||
|
||||
class CachedModelOnlyFullLoad:
|
||||
"""A wrapper around a PyTorch model to handle full loads and unloads between the CPU and the compute device.
|
||||
@@ -76,7 +78,15 @@ class CachedModelOnlyFullLoad:
|
||||
for k, v in self._cpu_state_dict.items():
|
||||
new_state_dict[k] = v.to(self._compute_device, copy=True)
|
||||
self._model.load_state_dict(new_state_dict, assign=True)
|
||||
self._model.to(self._compute_device)
|
||||
|
||||
check_for_gguf = hasattr(self._model, "state_dict") and self._model.state_dict().get("img_in.weight")
|
||||
if isinstance(check_for_gguf, GGMLTensor):
|
||||
old_value = torch.__future__.get_overwrite_module_params_on_conversion()
|
||||
torch.__future__.set_overwrite_module_params_on_conversion(True)
|
||||
self._model.to(self._compute_device)
|
||||
torch.__future__.set_overwrite_module_params_on_conversion(old_value)
|
||||
else:
|
||||
self._model.to(self._compute_device)
|
||||
|
||||
self._is_in_vram = True
|
||||
return self._total_bytes
|
||||
@@ -92,7 +102,15 @@ class CachedModelOnlyFullLoad:
|
||||
|
||||
if self._cpu_state_dict is not None:
|
||||
self._model.load_state_dict(self._cpu_state_dict, assign=True)
|
||||
self._model.to(self._offload_device)
|
||||
|
||||
check_for_gguf = hasattr(self._model, "state_dict") and self._model.state_dict().get("img_in.weight")
|
||||
if isinstance(check_for_gguf, GGMLTensor):
|
||||
old_value = torch.__future__.get_overwrite_module_params_on_conversion()
|
||||
torch.__future__.set_overwrite_module_params_on_conversion(True)
|
||||
self._model.to(self._offload_device)
|
||||
torch.__future__.set_overwrite_module_params_on_conversion(old_value)
|
||||
else:
|
||||
self._model.to(self._offload_device)
|
||||
|
||||
self._is_in_vram = False
|
||||
return self._total_bytes
|
||||
|
||||
@@ -2,9 +2,10 @@ import gc
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from functools import wraps
|
||||
from logging import Logger
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
from typing import Any, Callable, Dict, List, Optional, Protocol
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
@@ -54,6 +55,39 @@ def synchronized(method: Callable[..., Any]) -> Callable[..., Any]:
|
||||
return wrapper
|
||||
|
||||
|
||||
@dataclass
|
||||
class CacheEntrySnapshot:
|
||||
cache_key: str
|
||||
total_bytes: int
|
||||
current_vram_bytes: int
|
||||
|
||||
|
||||
class CacheMissCallback(Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
model_key: str,
|
||||
cache_snapshot: dict[str, CacheEntrySnapshot],
|
||||
) -> None: ...
|
||||
|
||||
|
||||
class CacheHitCallback(Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
model_key: str,
|
||||
cache_snapshot: dict[str, CacheEntrySnapshot],
|
||||
) -> None: ...
|
||||
|
||||
|
||||
class CacheModelsClearedCallback(Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
models_cleared: int,
|
||||
bytes_requested: int,
|
||||
bytes_freed: int,
|
||||
cache_snapshot: dict[str, CacheEntrySnapshot],
|
||||
) -> None: ...
|
||||
|
||||
|
||||
class ModelCache:
|
||||
"""A cache for managing models in memory.
|
||||
|
||||
@@ -144,6 +178,34 @@ class ModelCache:
|
||||
# - Requests to empty the cache from a separate thread
|
||||
self._lock = threading.RLock()
|
||||
|
||||
self._on_cache_hit_callbacks: set[CacheHitCallback] = set()
|
||||
self._on_cache_miss_callbacks: set[CacheMissCallback] = set()
|
||||
self._on_cache_models_cleared_callbacks: set[CacheModelsClearedCallback] = set()
|
||||
|
||||
def on_cache_hit(self, cb: CacheHitCallback) -> Callable[[], None]:
|
||||
self._on_cache_hit_callbacks.add(cb)
|
||||
|
||||
def unsubscribe() -> None:
|
||||
self._on_cache_hit_callbacks.discard(cb)
|
||||
|
||||
return unsubscribe
|
||||
|
||||
def on_cache_miss(self, cb: CacheHitCallback) -> Callable[[], None]:
|
||||
self._on_cache_miss_callbacks.add(cb)
|
||||
|
||||
def unsubscribe() -> None:
|
||||
self._on_cache_miss_callbacks.discard(cb)
|
||||
|
||||
return unsubscribe
|
||||
|
||||
def on_cache_models_cleared(self, cb: CacheModelsClearedCallback) -> Callable[[], None]:
|
||||
self._on_cache_models_cleared_callbacks.add(cb)
|
||||
|
||||
def unsubscribe() -> None:
|
||||
self._on_cache_models_cleared_callbacks.discard(cb)
|
||||
|
||||
return unsubscribe
|
||||
|
||||
@property
|
||||
@synchronized
|
||||
def stats(self) -> Optional[CacheStats]:
|
||||
@@ -195,6 +257,20 @@ class ModelCache:
|
||||
f"Added model {key} (Type: {model.__class__.__name__}, Wrap mode: {wrapped_model.__class__.__name__}, Model size: {size / MB:.2f}MB)"
|
||||
)
|
||||
|
||||
@synchronized
|
||||
def _get_cache_snapshot(self) -> dict[str, CacheEntrySnapshot]:
|
||||
overview: dict[str, CacheEntrySnapshot] = {}
|
||||
for cache_key, cache_entry in self._cached_models.items():
|
||||
total_bytes = cache_entry.cached_model.total_bytes()
|
||||
current_vram_bytes = cache_entry.cached_model.cur_vram_bytes()
|
||||
overview[cache_key] = CacheEntrySnapshot(
|
||||
cache_key=cache_key,
|
||||
total_bytes=total_bytes,
|
||||
current_vram_bytes=current_vram_bytes,
|
||||
)
|
||||
|
||||
return overview
|
||||
|
||||
@synchronized
|
||||
def get(self, key: str, stats_name: Optional[str] = None) -> CacheRecord:
|
||||
"""Retrieve a model from the cache.
|
||||
@@ -208,6 +284,8 @@ class ModelCache:
|
||||
if self.stats:
|
||||
self.stats.hits += 1
|
||||
else:
|
||||
for cb in self._on_cache_miss_callbacks:
|
||||
cb(model_key=key, cache_snapshot=self._get_cache_snapshot())
|
||||
if self.stats:
|
||||
self.stats.misses += 1
|
||||
self._logger.debug(f"Cache miss: {key}")
|
||||
@@ -229,6 +307,8 @@ class ModelCache:
|
||||
self._cache_stack.append(key)
|
||||
|
||||
self._logger.debug(f"Cache hit: {key} (Type: {cache_entry.cached_model.model.__class__.__name__})")
|
||||
for cb in self._on_cache_hit_callbacks:
|
||||
cb(model_key=key, cache_snapshot=self._get_cache_snapshot())
|
||||
return cache_entry
|
||||
|
||||
@synchronized
|
||||
@@ -649,6 +729,13 @@ class ModelCache:
|
||||
# immediately when their reference count hits 0.
|
||||
if self.stats:
|
||||
self.stats.cleared = models_cleared
|
||||
for cb in self._on_cache_models_cleared_callbacks:
|
||||
cb(
|
||||
models_cleared=models_cleared,
|
||||
bytes_requested=bytes_needed,
|
||||
bytes_freed=ram_bytes_freed,
|
||||
cache_snapshot=self._get_cache_snapshot(),
|
||||
)
|
||||
gc.collect()
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
@@ -13,6 +13,12 @@ from invokeai.backend.patches.layers.lora_layer import LoRALayer
|
||||
|
||||
def linear_lora_forward(input: torch.Tensor, lora_layer: LoRALayer, lora_weight: float) -> torch.Tensor:
|
||||
"""An optimized implementation of the residual calculation for a sidecar linear LoRALayer."""
|
||||
# up matrix and down matrix have different ranks so we can't simply multiply them
|
||||
if lora_layer.up.shape[1] != lora_layer.down.shape[0]:
|
||||
x = torch.nn.functional.linear(input, lora_layer.get_weight(lora_weight), bias=lora_layer.bias)
|
||||
x *= lora_weight * lora_layer.scale()
|
||||
return x
|
||||
|
||||
x = torch.nn.functional.linear(input, lora_layer.down)
|
||||
if lora_layer.mid is not None:
|
||||
x = torch.nn.functional.linear(x, lora_layer.mid)
|
||||
|
||||
@@ -20,6 +20,10 @@ from invokeai.backend.model_manager.taxonomy import (
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_aitoolkit_format,
|
||||
lora_model_from_flux_aitoolkit_state_dict,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import (
|
||||
is_state_dict_likely_flux_control,
|
||||
lora_model_from_flux_control_state_dict,
|
||||
@@ -92,6 +96,8 @@ class LoRALoader(ModelLoader):
|
||||
model = lora_model_from_flux_onetrainer_state_dict(state_dict=state_dict)
|
||||
elif is_state_dict_likely_flux_control(state_dict=state_dict):
|
||||
model = lora_model_from_flux_control_state_dict(state_dict=state_dict)
|
||||
elif is_state_dict_likely_in_flux_aitoolkit_format(state_dict=state_dict):
|
||||
model = lora_model_from_flux_aitoolkit_state_dict(state_dict=state_dict)
|
||||
else:
|
||||
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
|
||||
else:
|
||||
|
||||
@@ -62,11 +62,14 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
|
||||
# If this too fails, raise exception.
|
||||
|
||||
model_info = None
|
||||
|
||||
# Handling for our special syntax - we only want the base HF `org/repo` here.
|
||||
repo_id = id.split("::")[0] or id
|
||||
while not model_info:
|
||||
try:
|
||||
model_info = HfApi().model_info(repo_id=id, files_metadata=True, revision=variant)
|
||||
model_info = HfApi().model_info(repo_id=repo_id, files_metadata=True, revision=variant)
|
||||
except RepositoryNotFoundError as excp:
|
||||
raise UnknownMetadataException(f"'{id}' not found. See trace for details.") from excp
|
||||
raise UnknownMetadataException(f"'{repo_id}' not found. See trace for details.") from excp
|
||||
except RevisionNotFoundError:
|
||||
if variant is None:
|
||||
raise
|
||||
@@ -75,14 +78,14 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
|
||||
|
||||
files: list[RemoteModelFile] = []
|
||||
|
||||
_, name = id.split("/")
|
||||
_, name = repo_id.split("/")
|
||||
|
||||
for s in model_info.siblings or []:
|
||||
assert s.rfilename is not None
|
||||
assert s.size is not None
|
||||
files.append(
|
||||
RemoteModelFile(
|
||||
url=hf_hub_url(id, s.rfilename, revision=variant or "main"),
|
||||
url=hf_hub_url(repo_id, s.rfilename, revision=variant or "main"),
|
||||
path=Path(name, s.rfilename),
|
||||
size=s.size,
|
||||
sha256=s.lfs.get("sha256") if s.lfs else None,
|
||||
|
||||
@@ -4,6 +4,7 @@ from typing import Any, Optional, TypeAlias
|
||||
import safetensors.torch
|
||||
import torch
|
||||
from picklescan.scanner import scan_file_path
|
||||
from safetensors import safe_open
|
||||
|
||||
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash
|
||||
from invokeai.backend.model_manager.taxonomy import ModelRepoVariant
|
||||
@@ -35,12 +36,21 @@ class ModelOnDisk:
|
||||
return self.path.stat().st_size
|
||||
return sum(file.stat().st_size for file in self.path.rglob("*"))
|
||||
|
||||
def component_paths(self) -> set[Path]:
|
||||
def weight_files(self) -> set[Path]:
|
||||
if self.path.is_file():
|
||||
return {self.path}
|
||||
extensions = {".safetensors", ".pt", ".pth", ".ckpt", ".bin", ".gguf"}
|
||||
return {f for f in self.path.rglob("*") if f.suffix in extensions}
|
||||
|
||||
def metadata(self, path: Optional[Path] = None) -> dict[str, str]:
|
||||
try:
|
||||
with safe_open(self.path, framework="pt", device="cpu") as f:
|
||||
metadata = f.metadata()
|
||||
assert isinstance(metadata, dict)
|
||||
return metadata
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
def repo_variant(self) -> Optional[ModelRepoVariant]:
|
||||
if self.path.is_file():
|
||||
return None
|
||||
@@ -64,18 +74,7 @@ class ModelOnDisk:
|
||||
if path in sd_cache:
|
||||
return sd_cache[path]
|
||||
|
||||
if not path:
|
||||
components = list(self.component_paths())
|
||||
match components:
|
||||
case []:
|
||||
raise ValueError("No weight files found for this model")
|
||||
case [p]:
|
||||
path = p
|
||||
case ps if len(ps) >= 2:
|
||||
raise ValueError(
|
||||
f"Multiple weight files found for this model: {ps}. "
|
||||
f"Please specify the intended file using the 'path' argument"
|
||||
)
|
||||
path = self.resolve_weight_file(path)
|
||||
|
||||
with SilenceWarnings():
|
||||
if path.suffix.endswith((".ckpt", ".pt", ".pth", ".bin")):
|
||||
@@ -94,3 +93,18 @@ class ModelOnDisk:
|
||||
state_dict = checkpoint.get("state_dict", checkpoint)
|
||||
sd_cache[path] = state_dict
|
||||
return state_dict
|
||||
|
||||
def resolve_weight_file(self, path: Optional[Path] = None) -> Path:
|
||||
if not path:
|
||||
weight_files = list(self.weight_files())
|
||||
match weight_files:
|
||||
case []:
|
||||
raise ValueError("No weight files found for this model")
|
||||
case [p]:
|
||||
return p
|
||||
case ps if len(ps) >= 2:
|
||||
raise ValueError(
|
||||
f"Multiple weight files found for this model: {ps}. "
|
||||
f"Please specify the intended file using the 'path' argument"
|
||||
)
|
||||
return path
|
||||
|
||||
@@ -297,6 +297,15 @@ ip_adapter_sdxl = StarterModel(
|
||||
dependencies=[ip_adapter_sdxl_image_encoder],
|
||||
previous_names=["IP Adapter SDXL"],
|
||||
)
|
||||
ip_adapter_plus_sdxl = StarterModel(
|
||||
name="Precise Reference (IP Adapter Plus ViT-H)",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="https://huggingface.co/InvokeAI/ip-adapter-plus_sdxl_vit-h/resolve/main/ip-adapter-plus_sdxl_vit-h.safetensors",
|
||||
description="References images with a higher degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sdxl_image_encoder],
|
||||
previous_names=["IP Adapter Plus SDXL"],
|
||||
)
|
||||
ip_adapter_flux = StarterModel(
|
||||
name="Standard Reference (XLabs FLUX IP-Adapter v2)",
|
||||
base=BaseModelType.Flux,
|
||||
@@ -672,6 +681,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
ip_adapter_plus_sd1,
|
||||
ip_adapter_plus_face_sd1,
|
||||
ip_adapter_sdxl,
|
||||
ip_adapter_plus_sdxl,
|
||||
ip_adapter_flux,
|
||||
qr_code_cnet_sd1,
|
||||
qr_code_cnet_sdxl,
|
||||
@@ -744,6 +754,7 @@ sdxl_bundle: list[StarterModel] = [
|
||||
juggernaut_sdxl,
|
||||
sdxl_fp16_vae_fix,
|
||||
ip_adapter_sdxl,
|
||||
ip_adapter_plus_sdxl,
|
||||
canny_sdxl,
|
||||
depth_sdxl,
|
||||
softedge_sdxl,
|
||||
|
||||
@@ -26,7 +26,9 @@ class BaseModelType(str, Enum):
|
||||
StableDiffusionXLRefiner = "sdxl-refiner"
|
||||
Flux = "flux"
|
||||
CogView4 = "cogview4"
|
||||
# Kandinsky2_1 = "kandinsky-2.1"
|
||||
Imagen3 = "imagen3"
|
||||
Imagen4 = "imagen4"
|
||||
ChatGPT4o = "chatgpt-4o"
|
||||
|
||||
|
||||
class ModelType(str, Enum):
|
||||
@@ -98,6 +100,7 @@ class ModelFormat(str, Enum):
|
||||
BnbQuantizedLlmInt8b = "bnb_quantized_int8b"
|
||||
BnbQuantizednf4b = "bnb_quantized_nf4b"
|
||||
GGUFQuantized = "gguf_quantized"
|
||||
Api = "api"
|
||||
|
||||
|
||||
class SchedulerPredictionType(str, Enum):
|
||||
@@ -134,6 +137,7 @@ class FluxLoRAFormat(str, Enum):
|
||||
Kohya = "flux.kohya"
|
||||
OneTrainer = "flux.onetrainer"
|
||||
Control = "flux.control"
|
||||
AIToolkit = "flux.aitoolkit"
|
||||
|
||||
|
||||
AnyVariant: TypeAlias = Union[ModelVariantType, ClipVariantType, None]
|
||||
|
||||
@@ -46,6 +46,10 @@ class ModelPatcher:
|
||||
text_encoder: Union[CLIPTextModel, CLIPTextModelWithProjection],
|
||||
ti_list: List[Tuple[str, TextualInversionModelRaw]],
|
||||
) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]:
|
||||
if len(ti_list) == 0:
|
||||
yield tokenizer, TextualInversionManager(tokenizer)
|
||||
return
|
||||
|
||||
init_tokens_count = None
|
||||
new_tokens_added = None
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import re
|
||||
from contextlib import contextmanager
|
||||
from typing import Dict, Iterable, Optional, Tuple
|
||||
|
||||
@@ -7,6 +8,7 @@ from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
from invokeai.backend.patches.pad_with_zeros import pad_with_zeros
|
||||
from invokeai.backend.util import InvokeAILogger
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
|
||||
|
||||
@@ -23,6 +25,7 @@ class LayerPatcher:
|
||||
cached_weights: Optional[Dict[str, torch.Tensor]] = None,
|
||||
force_direct_patching: bool = False,
|
||||
force_sidecar_patching: bool = False,
|
||||
suppress_warning_layers: Optional[re.Pattern] = None,
|
||||
):
|
||||
"""Apply 'smart' model patching that chooses whether to use direct patching or a sidecar wrapper for each
|
||||
module.
|
||||
@@ -44,6 +47,7 @@ class LayerPatcher:
|
||||
dtype=dtype,
|
||||
force_direct_patching=force_direct_patching,
|
||||
force_sidecar_patching=force_sidecar_patching,
|
||||
suppress_warning_layers=suppress_warning_layers,
|
||||
)
|
||||
|
||||
yield
|
||||
@@ -70,6 +74,7 @@ class LayerPatcher:
|
||||
dtype: torch.dtype,
|
||||
force_direct_patching: bool,
|
||||
force_sidecar_patching: bool,
|
||||
suppress_warning_layers: Optional[re.Pattern] = None,
|
||||
):
|
||||
"""Apply a single LoRA patch to a model using the 'smart' patching strategy that chooses whether to use direct
|
||||
patching or a sidecar wrapper for each module.
|
||||
@@ -89,9 +94,17 @@ class LayerPatcher:
|
||||
if not layer_key.startswith(prefix):
|
||||
continue
|
||||
|
||||
module_key, module = LayerPatcher._get_submodule(
|
||||
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
|
||||
)
|
||||
try:
|
||||
module_key, module = LayerPatcher._get_submodule(
|
||||
model, layer_key[prefix_len:], layer_key_is_flattened=layer_keys_are_flattened
|
||||
)
|
||||
except AttributeError:
|
||||
if suppress_warning_layers and suppress_warning_layers.search(layer_key):
|
||||
pass
|
||||
else:
|
||||
logger = InvokeAILogger.get_logger(LayerPatcher.__name__)
|
||||
logger.warning("Failed to find module for LoRA layer key: %s", layer_key)
|
||||
continue
|
||||
|
||||
# Decide whether to use direct patching or a sidecar patch.
|
||||
# Direct patching is preferred, because it results in better runtime speed.
|
||||
|
||||
@@ -19,6 +19,7 @@ class LoRALayer(LoRALayerBase):
|
||||
self.up = up
|
||||
self.mid = mid
|
||||
self.down = down
|
||||
self.are_ranks_equal = up.shape[1] == down.shape[0]
|
||||
|
||||
@classmethod
|
||||
def from_state_dict_values(
|
||||
@@ -58,12 +59,42 @@ class LoRALayer(LoRALayerBase):
|
||||
def _rank(self) -> int:
|
||||
return self.down.shape[0]
|
||||
|
||||
def fuse_weights(self, up: torch.Tensor, down: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
Fuse the weights of the up and down matrices of a LoRA layer with different ranks.
|
||||
|
||||
Since the Huggingface implementation of KQV projections are fused, when we convert to Kohya format
|
||||
the LoRA weights have different ranks. This function handles the fusion of these differently sized
|
||||
matrices.
|
||||
"""
|
||||
|
||||
fused_lora = torch.zeros((up.shape[0], down.shape[1]), device=down.device, dtype=down.dtype)
|
||||
rank_diff = down.shape[0] / up.shape[1]
|
||||
|
||||
if rank_diff > 1:
|
||||
rank_diff = down.shape[0] / up.shape[1]
|
||||
w_down = down.chunk(int(rank_diff), dim=0)
|
||||
for w_down_chunk in w_down:
|
||||
fused_lora = fused_lora + (torch.mm(up, w_down_chunk))
|
||||
else:
|
||||
rank_diff = up.shape[1] / down.shape[0]
|
||||
w_up = up.chunk(int(rank_diff), dim=0)
|
||||
for w_up_chunk in w_up:
|
||||
fused_lora = fused_lora + (torch.mm(w_up_chunk, down))
|
||||
|
||||
return fused_lora
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
if self.mid is not None:
|
||||
up = self.up.reshape(self.up.shape[0], self.up.shape[1])
|
||||
down = self.down.reshape(self.down.shape[0], self.down.shape[1])
|
||||
weight = torch.einsum("m n w h, i m, n j -> i j w h", self.mid, up, down)
|
||||
else:
|
||||
# up matrix and down matrix have different ranks so we can't simply multiply them
|
||||
if not self.are_ranks_equal:
|
||||
weight = self.fuse_weights(self.up, self.down)
|
||||
return weight
|
||||
|
||||
weight = self.up.reshape(self.up.shape[0], -1) @ self.down.reshape(self.down.shape[0], -1)
|
||||
|
||||
return weight
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
|
||||
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import _group_by_layer
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
from invokeai.backend.util import InvokeAILogger
|
||||
|
||||
|
||||
def is_state_dict_likely_in_flux_aitoolkit_format(state_dict: dict[str, Any], metadata: dict[str, Any] = None) -> bool:
|
||||
if metadata:
|
||||
try:
|
||||
software = json.loads(metadata.get("software", "{}"))
|
||||
except json.JSONDecodeError:
|
||||
return False
|
||||
return software.get("name") == "ai-toolkit"
|
||||
# metadata got lost somewhere
|
||||
return any("diffusion_model" == k.split(".", 1)[0] for k in state_dict.keys())
|
||||
|
||||
|
||||
@dataclass
|
||||
class GroupedStateDict:
|
||||
transformer: dict[str, Any] = field(default_factory=dict)
|
||||
# might also grow CLIP and T5 submodels
|
||||
|
||||
|
||||
def _group_state_by_submodel(state_dict: dict[str, Any]) -> GroupedStateDict:
|
||||
logger = InvokeAILogger.get_logger()
|
||||
grouped = GroupedStateDict()
|
||||
for key, value in state_dict.items():
|
||||
submodel_name, param_name = key.split(".", 1)
|
||||
match submodel_name:
|
||||
case "diffusion_model":
|
||||
grouped.transformer[param_name] = value
|
||||
case _:
|
||||
logger.warning(f"Unexpected submodel name: {submodel_name}")
|
||||
return grouped
|
||||
|
||||
|
||||
def _rename_peft_lora_keys(state_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
||||
"""Renames keys from the PEFT LoRA format to the InvokeAI format."""
|
||||
renamed_state_dict = {}
|
||||
for key, value in state_dict.items():
|
||||
renamed_key = key.replace(".lora_A.", ".lora_down.").replace(".lora_B.", ".lora_up.")
|
||||
renamed_state_dict[renamed_key] = value
|
||||
return renamed_state_dict
|
||||
|
||||
|
||||
def lora_model_from_flux_aitoolkit_state_dict(state_dict: dict[str, torch.Tensor]) -> ModelPatchRaw:
|
||||
state_dict = _rename_peft_lora_keys(state_dict)
|
||||
by_layer = _group_by_layer(state_dict)
|
||||
by_model = _group_state_by_submodel(by_layer)
|
||||
|
||||
layers: dict[str, BaseLayerPatch] = {}
|
||||
for layer_key, layer_state_dict in by_model.transformer.items():
|
||||
layers[FLUX_LORA_TRANSFORMER_PREFIX + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
|
||||
|
||||
return ModelPatchRaw(layers=layers)
|
||||
@@ -20,6 +20,14 @@ from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
FLUX_KOHYA_TRANSFORMER_KEY_REGEX = (
|
||||
r"lora_unet_(\w+_blocks)_(\d+)_(img_attn|img_mlp|img_mod|txt_attn|txt_mlp|txt_mod|linear1|linear2|modulation)_?(.*)"
|
||||
)
|
||||
|
||||
# A regex pattern that matches all of the last layer keys in the Kohya FLUX LoRA format.
|
||||
# Example keys:
|
||||
# lora_unet_final_layer_linear.alpha
|
||||
# lora_unet_final_layer_linear.lora_down.weight
|
||||
# lora_unet_final_layer_linear.lora_up.weight
|
||||
FLUX_KOHYA_LAST_LAYER_KEY_REGEX = r"lora_unet_final_layer_(linear|linear1|linear2)_?(.*)"
|
||||
|
||||
# A regex pattern that matches all of the CLIP keys in the Kohya FLUX LoRA format.
|
||||
# Example keys:
|
||||
# lora_te1_text_model_encoder_layers_0_mlp_fc1.alpha
|
||||
@@ -44,6 +52,7 @@ def is_state_dict_likely_in_flux_kohya_format(state_dict: Dict[str, Any]) -> boo
|
||||
"""
|
||||
return all(
|
||||
re.match(FLUX_KOHYA_TRANSFORMER_KEY_REGEX, k)
|
||||
or re.match(FLUX_KOHYA_LAST_LAYER_KEY_REGEX, k)
|
||||
or re.match(FLUX_KOHYA_CLIP_KEY_REGEX, k)
|
||||
or re.match(FLUX_KOHYA_T5_KEY_REGEX, k)
|
||||
for k in state_dict.keys()
|
||||
@@ -65,6 +74,9 @@ def lora_model_from_flux_kohya_state_dict(state_dict: Dict[str, torch.Tensor]) -
|
||||
t5_grouped_sd: dict[str, dict[str, torch.Tensor]] = {}
|
||||
for layer_name, layer_state_dict in grouped_state_dict.items():
|
||||
if layer_name.startswith("lora_unet"):
|
||||
# Skip the final layer. This is incompatible with current model definition.
|
||||
if layer_name.startswith("lora_unet_final_layer"):
|
||||
continue
|
||||
transformer_grouped_sd[layer_name] = layer_state_dict
|
||||
elif layer_name.startswith("lora_te1"):
|
||||
clip_grouped_sd[layer_name] = layer_state_dict
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
from invokeai.backend.model_manager.taxonomy import FluxLoRAFormat
|
||||
from invokeai.backend.patches.lora_conversions.flux_aitoolkit_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_aitoolkit_format,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_control_lora_utils import is_state_dict_likely_flux_control
|
||||
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_diffusers_format,
|
||||
@@ -11,7 +14,7 @@ from invokeai.backend.patches.lora_conversions.flux_onetrainer_lora_conversion_u
|
||||
)
|
||||
|
||||
|
||||
def flux_format_from_state_dict(state_dict):
|
||||
def flux_format_from_state_dict(state_dict: dict, metadata: dict | None = None) -> FluxLoRAFormat | None:
|
||||
if is_state_dict_likely_in_flux_kohya_format(state_dict):
|
||||
return FluxLoRAFormat.Kohya
|
||||
elif is_state_dict_likely_in_flux_onetrainer_format(state_dict):
|
||||
@@ -20,5 +23,7 @@ def flux_format_from_state_dict(state_dict):
|
||||
return FluxLoRAFormat.Diffusers
|
||||
elif is_state_dict_likely_flux_control(state_dict):
|
||||
return FluxLoRAFormat.Control
|
||||
elif is_state_dict_likely_in_flux_aitoolkit_format(state_dict, metadata):
|
||||
return FluxLoRAFormat.AIToolkit
|
||||
else:
|
||||
return None
|
||||
|
||||
@@ -5,7 +5,8 @@ from typing import Callable, Optional, Union
|
||||
import gguf
|
||||
import torch
|
||||
|
||||
TORCH_COMPATIBLE_QTYPES = {None, gguf.GGMLQuantizationType.F32, gguf.GGMLQuantizationType.F16}
|
||||
# should not be a Set until this is resolved: https://github.com/pytorch/pytorch/issues/145761
|
||||
TORCH_COMPATIBLE_QTYPES = [None, gguf.GGMLQuantizationType.F32, gguf.GGMLQuantizationType.F16]
|
||||
|
||||
# K Quants #
|
||||
QK_K = 256
|
||||
|
||||
@@ -30,18 +30,13 @@ class RectifiedFlowInpaintExtension:
|
||||
def _apply_mask_gradient_adjustment(self, t_prev: float) -> torch.Tensor:
|
||||
"""Applies inpaint mask gradient adjustment and returns the inpaint mask to be used at the current timestep."""
|
||||
# As we progress through the denoising process, we promote gradient regions of the mask to have a full weight of
|
||||
# 1.0. This helps to produce more coherent seams around the inpainted region. We experimented with a (small)
|
||||
# number of promotion strategies (e.g. gradual promotion based on timestep), but found that a simple cutoff
|
||||
# threshold worked well.
|
||||
# 1.0. This helps to produce more coherent seams around the inpainted region.
|
||||
|
||||
# We use a small epsilon to avoid any potential issues with floating point precision.
|
||||
eps = 1e-4
|
||||
mask_gradient_t_cutoff = 0.5
|
||||
if t_prev > mask_gradient_t_cutoff:
|
||||
# Early in the denoising process, use the inpaint mask as-is.
|
||||
return self._inpaint_mask
|
||||
else:
|
||||
# After the cut-off, promote all non-zero mask values to 1.0.
|
||||
mask = self._inpaint_mask.where(self._inpaint_mask <= (0.0 + eps), 1.0)
|
||||
mask = torch.where(self._inpaint_mask >= t_prev + eps, 1.0, 0.0).to(
|
||||
dtype=self._inpaint_mask.dtype, device=self._inpaint_mask.device
|
||||
)
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
@@ -371,7 +371,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
|
||||
if use_ip_adapter or use_regional_prompting:
|
||||
ip_adapters: Optional[List[UNetIPAdapterData]] = (
|
||||
[{"ip_adapter": ipa.ip_adapter_model, "target_blocks": ipa.target_blocks} for ipa in ip_adapter_data]
|
||||
[
|
||||
{"ip_adapter": ipa.ip_adapter_model, "target_blocks": ipa.target_blocks, "method": ipa.method}
|
||||
for ipa in ip_adapter_data
|
||||
]
|
||||
if use_ip_adapter
|
||||
else None
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
||||
|
||||
@@ -104,15 +104,29 @@ class IPAdapterConditioningInfo:
|
||||
|
||||
@dataclass
|
||||
class IPAdapterData:
|
||||
"""Data class for IP-Adapter configuration.
|
||||
|
||||
Attributes:
|
||||
ip_adapter_model: The IP-Adapter model to use.
|
||||
ip_adapter_conditioning: The IP-Adapter conditioning data.
|
||||
mask: The mask to apply to the IP-Adapter conditioning.
|
||||
target_blocks: List of target attention block names to apply IP-Adapter to.
|
||||
negative_blocks: List of target attention block names that should use negative attention.
|
||||
weight: The weight to apply to the IP-Adapter conditioning.
|
||||
begin_step_percent: The percentage of steps at which to start applying the IP-Adapter.
|
||||
end_step_percent: The percentage of steps at which to stop applying the IP-Adapter.
|
||||
method: The method to use for applying the IP-Adapter ('full', 'style', 'composition').
|
||||
"""
|
||||
|
||||
ip_adapter_model: IPAdapter
|
||||
ip_adapter_conditioning: IPAdapterConditioningInfo
|
||||
mask: torch.Tensor
|
||||
target_blocks: List[str]
|
||||
|
||||
# Either a single weight applied to all steps, or a list of weights for each step.
|
||||
negative_blocks: List[str] = field(default_factory=list)
|
||||
weight: Union[float, List[float]] = 1.0
|
||||
begin_step_percent: float = 0.0
|
||||
end_step_percent: float = 1.0
|
||||
method: str = "full"
|
||||
|
||||
def scale_for_step(self, step_index: int, total_steps: int) -> float:
|
||||
first_adapter_step = math.floor(self.begin_step_percent * total_steps)
|
||||
|
||||
@@ -14,6 +14,7 @@ from invokeai.backend.stable_diffusion.diffusion.regional_prompt_data import Reg
|
||||
class IPAdapterAttentionWeights:
|
||||
ip_adapter_weights: IPAttentionProcessorWeights
|
||||
skip: bool
|
||||
negative: bool
|
||||
|
||||
|
||||
class CustomAttnProcessor2_0(AttnProcessor2_0):
|
||||
@@ -162,6 +163,10 @@ class CustomAttnProcessor2_0(AttnProcessor2_0):
|
||||
# Expected ip_hidden_state shape: (batch_size, num_ip_images, ip_seq_len, ip_image_embedding)
|
||||
|
||||
if not self._ip_adapter_attention_weights[ipa_index].skip:
|
||||
# apply the IP-Adapter weights to the negative embeds
|
||||
if self._ip_adapter_attention_weights[ipa_index].negative:
|
||||
ip_hidden_states = torch.cat([ip_hidden_states[1], ip_hidden_states[0] * 0], dim=0)
|
||||
|
||||
ip_key = ipa_weights.to_k_ip(ip_hidden_states)
|
||||
ip_value = ipa_weights.to_v_ip(ip_hidden_states)
|
||||
|
||||
|
||||
@@ -12,7 +12,8 @@ from invokeai.backend.stable_diffusion.diffusion.custom_atttention import (
|
||||
|
||||
class UNetIPAdapterData(TypedDict):
|
||||
ip_adapter: IPAdapter
|
||||
target_blocks: List[str]
|
||||
target_blocks: List[str] # Blocks where IP-Adapter should be applied
|
||||
method: str # Style or other method type
|
||||
|
||||
|
||||
class UNetAttentionPatcher:
|
||||
@@ -39,12 +40,18 @@ class UNetAttentionPatcher:
|
||||
for ip_adapter in self._ip_adapters:
|
||||
ip_adapter_weights = ip_adapter["ip_adapter"].attn_weights.get_attention_processor_weights(idx)
|
||||
skip = True
|
||||
negative = False
|
||||
for block in ip_adapter["target_blocks"]:
|
||||
if block in name:
|
||||
skip = False
|
||||
negative = ip_adapter["method"] == "style_precise" and (
|
||||
block == "down_blocks.2.attentions.1"
|
||||
or block == "down_blocks.2"
|
||||
or block == "mid_block"
|
||||
)
|
||||
break
|
||||
ip_adapter_attention_weights: IPAdapterAttentionWeights = IPAdapterAttentionWeights(
|
||||
ip_adapter_weights=ip_adapter_weights, skip=skip
|
||||
ip_adapter_weights=ip_adapter_weights, skip=skip, negative=negative
|
||||
)
|
||||
ip_adapter_attention_weights_collection.append(ip_adapter_attention_weights)
|
||||
|
||||
|
||||
@@ -14,6 +14,8 @@ const config: KnipConfig = {
|
||||
'src/features/controlLayers/konva/util.ts',
|
||||
// TODO(psyche): restore HRF functionality?
|
||||
'src/features/hrf/**',
|
||||
// This feature is (temprarily?) disabled
|
||||
'src/features/controlLayers/components/InpaintMask/InpaintMaskAddButtons.tsx',
|
||||
],
|
||||
ignoreBinaries: ['only-allow'],
|
||||
paths: {
|
||||
|
||||
@@ -52,68 +52,68 @@
|
||||
}
|
||||
},
|
||||
"dependencies": {
|
||||
"@atlaskit/pragmatic-drag-and-drop": "^1.4.0",
|
||||
"@atlaskit/pragmatic-drag-and-drop-auto-scroll": "^1.4.0",
|
||||
"@atlaskit/pragmatic-drag-and-drop": "^1.5.3",
|
||||
"@atlaskit/pragmatic-drag-and-drop-auto-scroll": "^2.1.0",
|
||||
"@atlaskit/pragmatic-drag-and-drop-hitbox": "^1.0.3",
|
||||
"@dagrejs/dagre": "^1.1.4",
|
||||
"@dagrejs/graphlib": "^2.2.4",
|
||||
"@fontsource-variable/inter": "^5.1.0",
|
||||
"@fontsource-variable/inter": "^5.2.5",
|
||||
"@invoke-ai/ui-library": "^0.0.46",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.6.1",
|
||||
"@nanostores/react": "^1.0.0",
|
||||
"@reduxjs/toolkit": "2.7.0",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
"@xyflow/react": "^12.5.3",
|
||||
"@xyflow/react": "^12.6.0",
|
||||
"async-mutex": "^0.5.0",
|
||||
"chakra-react-select": "^4.9.2",
|
||||
"cmdk": "^1.0.0",
|
||||
"cmdk": "^1.1.1",
|
||||
"compare-versions": "^6.1.1",
|
||||
"filesize": "^10.1.6",
|
||||
"fracturedjsonjs": "^4.0.2",
|
||||
"fracturedjsonjs": "^4.1.0",
|
||||
"framer-motion": "^11.10.0",
|
||||
"i18next": "^23.15.1",
|
||||
"i18next-http-backend": "^2.6.1",
|
||||
"i18next": "^25.0.1",
|
||||
"i18next-http-backend": "^3.0.2",
|
||||
"idb-keyval": "^6.2.1",
|
||||
"jsondiffpatch": "^0.6.0",
|
||||
"konva": "^9.3.15",
|
||||
"jsondiffpatch": "^0.7.3",
|
||||
"konva": "^9.3.20",
|
||||
"linkify-react": "^4.2.0",
|
||||
"linkifyjs": "^4.2.0",
|
||||
"lodash-es": "^4.17.21",
|
||||
"lru-cache": "^11.0.1",
|
||||
"lru-cache": "^11.1.0",
|
||||
"mtwist": "^1.0.2",
|
||||
"nanoid": "^5.0.7",
|
||||
"nanostores": "^0.11.3",
|
||||
"new-github-issue-url": "^1.0.0",
|
||||
"overlayscrollbars": "^2.10.0",
|
||||
"nanoid": "^5.1.5",
|
||||
"nanostores": "^1.0.1",
|
||||
"new-github-issue-url": "^1.1.0",
|
||||
"overlayscrollbars": "^2.11.1",
|
||||
"overlayscrollbars-react": "^0.5.6",
|
||||
"perfect-freehand": "^1.2.2",
|
||||
"query-string": "^9.1.0",
|
||||
"query-string": "^9.1.1",
|
||||
"raf-throttle": "^2.0.6",
|
||||
"react": "^18.3.1",
|
||||
"react-colorful": "^5.6.1",
|
||||
"react-dom": "^18.3.1",
|
||||
"react-dropzone": "^14.2.9",
|
||||
"react-error-boundary": "^4.0.13",
|
||||
"react-hook-form": "^7.53.0",
|
||||
"react-dropzone": "^14.3.8",
|
||||
"react-error-boundary": "^5.0.0",
|
||||
"react-hook-form": "^7.56.1",
|
||||
"react-hotkeys-hook": "4.5.0",
|
||||
"react-i18next": "^15.0.2",
|
||||
"react-icons": "^5.3.0",
|
||||
"react-redux": "9.1.2",
|
||||
"react-resizable-panels": "^2.1.4",
|
||||
"react-textarea-autosize": "^8.5.7",
|
||||
"react-use": "^17.5.1",
|
||||
"react-virtuoso": "^4.12.5",
|
||||
"react-i18next": "^15.5.1",
|
||||
"react-icons": "^5.5.0",
|
||||
"react-redux": "9.2.0",
|
||||
"react-resizable-panels": "^2.1.8",
|
||||
"react-textarea-autosize": "^8.5.9",
|
||||
"react-use": "^17.6.0",
|
||||
"react-virtuoso": "^4.12.6",
|
||||
"redux-dynamic-middlewares": "^2.2.0",
|
||||
"redux-remember": "^5.1.0",
|
||||
"redux-remember": "^5.2.0",
|
||||
"redux-undo": "^1.1.0",
|
||||
"rfdc": "^1.4.1",
|
||||
"roarr": "^7.21.1",
|
||||
"serialize-error": "^11.0.3",
|
||||
"socket.io-client": "^4.8.0",
|
||||
"stable-hash": "^0.0.4",
|
||||
"use-debounce": "^10.0.3",
|
||||
"serialize-error": "^12.0.0",
|
||||
"socket.io-client": "^4.8.1",
|
||||
"stable-hash": "^0.0.5",
|
||||
"use-debounce": "^10.0.4",
|
||||
"use-device-pixel-ratio": "^1.1.2",
|
||||
"uuid": "^10.0.0",
|
||||
"zod": "^3.23.8",
|
||||
"uuid": "^11.1.0",
|
||||
"zod": "^3.24.3",
|
||||
"zod-validation-error": "^3.4.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
@@ -123,43 +123,43 @@
|
||||
"devDependencies": {
|
||||
"@invoke-ai/eslint-config-react": "^0.0.14",
|
||||
"@invoke-ai/prettier-config-react": "^0.0.7",
|
||||
"@storybook/addon-essentials": "^8.3.4",
|
||||
"@storybook/addon-interactions": "^8.3.4",
|
||||
"@storybook/addon-links": "^8.3.4",
|
||||
"@storybook/addon-storysource": "^8.3.4",
|
||||
"@storybook/manager-api": "^8.3.4",
|
||||
"@storybook/react": "^8.3.4",
|
||||
"@storybook/react-vite": "^8.5.5",
|
||||
"@storybook/theming": "^8.3.4",
|
||||
"@storybook/addon-essentials": "^8.6.12",
|
||||
"@storybook/addon-interactions": "^8.6.12",
|
||||
"@storybook/addon-links": "^8.6.12",
|
||||
"@storybook/addon-storysource": "^8.6.12",
|
||||
"@storybook/manager-api": "^8.6.12",
|
||||
"@storybook/react": "^8.6.12",
|
||||
"@storybook/react-vite": "^8.6.12",
|
||||
"@storybook/theming": "^8.6.12",
|
||||
"@types/lodash-es": "^4.17.12",
|
||||
"@types/node": "^20.16.10",
|
||||
"@types/node": "^22.15.1",
|
||||
"@types/react": "^18.3.11",
|
||||
"@types/react-dom": "^18.3.0",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@vitejs/plugin-react-swc": "^3.8.0",
|
||||
"@vitest/coverage-v8": "^3.0.6",
|
||||
"@vitest/ui": "^3.0.6",
|
||||
"concurrently": "^8.2.2",
|
||||
"@vitejs/plugin-react-swc": "^3.9.0",
|
||||
"@vitest/coverage-v8": "^3.1.2",
|
||||
"@vitest/ui": "^3.1.2",
|
||||
"concurrently": "^9.1.2",
|
||||
"csstype": "^3.1.3",
|
||||
"dpdm": "^3.14.0",
|
||||
"eslint": "^8.57.1",
|
||||
"eslint-plugin-i18next": "^6.1.0",
|
||||
"eslint-plugin-i18next": "^6.1.1",
|
||||
"eslint-plugin-path": "^1.3.0",
|
||||
"knip": "^5.31.0",
|
||||
"knip": "^5.50.5",
|
||||
"openapi-types": "^12.1.3",
|
||||
"openapi-typescript": "^7.4.1",
|
||||
"prettier": "^3.3.3",
|
||||
"rollup-plugin-visualizer": "^5.12.0",
|
||||
"storybook": "^8.3.4",
|
||||
"openapi-typescript": "^7.6.1",
|
||||
"prettier": "^3.5.3",
|
||||
"rollup-plugin-visualizer": "^5.14.0",
|
||||
"storybook": "^8.6.12",
|
||||
"tsafe": "^1.8.5",
|
||||
"type-fest": "^4.26.1",
|
||||
"typescript": "^5.6.2",
|
||||
"vite": "^6.1.0",
|
||||
"type-fest": "^4.40.0",
|
||||
"typescript": "^5.8.3",
|
||||
"vite": "^6.3.3",
|
||||
"vite-plugin-css-injected-by-js": "^3.5.2",
|
||||
"vite-plugin-dts": "^4.5.0",
|
||||
"vite-plugin-dts": "^4.5.3",
|
||||
"vite-plugin-eslint": "^1.8.1",
|
||||
"vite-tsconfig-paths": "^5.1.4",
|
||||
"vitest": "^3.0.6"
|
||||
"vitest": "^3.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"pnpm": "8"
|
||||
|
||||
3721
invokeai/frontend/web/pnpm-lock.yaml
generated
3721
invokeai/frontend/web/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -119,7 +119,17 @@
|
||||
"error_withCount_other": "{{count}} Fehler",
|
||||
"value": "Wert",
|
||||
"label": "Label",
|
||||
"systemInformation": "Systeminformationen"
|
||||
"systemInformation": "Systeminformationen",
|
||||
"search": "Suche",
|
||||
"clear": "Zurücksetzen",
|
||||
"fullView": "Vollansicht",
|
||||
"compactView": "Kompaktansicht",
|
||||
"options_withCount_one": "{{count}} Option",
|
||||
"options_withCount_other": "{{count}} Optionen",
|
||||
"noOptions": "Keine Optionen",
|
||||
"noMatches": "Keine Treffer",
|
||||
"model_withCount_one": "{{count}} Modell",
|
||||
"model_withCount_other": "{{count}} Modelle"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Bildgröße",
|
||||
|
||||
@@ -24,15 +24,18 @@
|
||||
"autoAddBoard": "Auto-Add Board",
|
||||
"boards": "Boards",
|
||||
"selectedForAutoAdd": "Selected for Auto-Add",
|
||||
"bottomMessage": "Deleting this board and its images will reset any features currently using them.",
|
||||
"bottomMessage": "Deleting images will reset any features currently using them.",
|
||||
"cancel": "Cancel",
|
||||
"changeBoard": "Change Board",
|
||||
"clearSearch": "Clear Search",
|
||||
"deleteBoard": "Delete Board",
|
||||
"deleteBoardAndImages": "Delete Board and Images",
|
||||
"deleteBoardOnly": "Delete Board Only",
|
||||
"deletedBoardsCannotbeRestored": "Deleted boards cannot be restored. Selecting 'Delete Board Only' will move images to an uncategorized state.",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Deleted boards cannot be restored. Selecting 'Delete Board Only' will move images to a private uncategorized state for the image's creator.",
|
||||
"deletedBoardsCannotbeRestored": "Deleted boards and images cannot be restored. Selecting 'Delete Board Only' will move images to an uncategorized state.",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Deleted boards and images cannot be restored. Selecting 'Delete Board Only' will move images to a private uncategorized state for the image's creator.",
|
||||
"uncategorizedImages": "Uncategorized Images",
|
||||
"deleteAllUncategorizedImages": "Delete All Uncategorized Images",
|
||||
"deletedImagesCannotBeRestored": "Deleted images cannot be restored.",
|
||||
"hideBoards": "Hide Boards",
|
||||
"loading": "Loading...",
|
||||
"menuItemAutoAdd": "Auto-add to this Board",
|
||||
@@ -46,7 +49,7 @@
|
||||
"searchBoard": "Search Boards...",
|
||||
"selectBoard": "Select a Board",
|
||||
"shared": "Shared Boards",
|
||||
"topMessage": "This board contains images used in the following features:",
|
||||
"topMessage": "This selection contains images used in the following features:",
|
||||
"unarchiveBoard": "Unarchive Board",
|
||||
"uncategorized": "Uncategorized",
|
||||
"viewBoards": "View Boards",
|
||||
@@ -118,6 +121,8 @@
|
||||
"error": "Error",
|
||||
"error_withCount_one": "{{count}} error",
|
||||
"error_withCount_other": "{{count}} errors",
|
||||
"model_withCount_one": "{{count}} model",
|
||||
"model_withCount_other": "{{count}} models",
|
||||
"file": "File",
|
||||
"folder": "Folder",
|
||||
"format": "format",
|
||||
@@ -138,6 +143,8 @@
|
||||
"localSystem": "Local System",
|
||||
"learnMore": "Learn More",
|
||||
"modelManager": "Model Manager",
|
||||
"noMatches": "No matches",
|
||||
"noOptions": "No options",
|
||||
"nodes": "Workflows",
|
||||
"notInstalled": "Not $t(common.installed)",
|
||||
"openInNewTab": "Open in New Tab",
|
||||
@@ -171,6 +178,8 @@
|
||||
"blue": "Blue",
|
||||
"alpha": "Alpha",
|
||||
"selected": "Selected",
|
||||
"search": "Search",
|
||||
"clear": "Clear",
|
||||
"tab": "Tab",
|
||||
"view": "View",
|
||||
"edit": "Edit",
|
||||
@@ -197,7 +206,11 @@
|
||||
"column": "Column",
|
||||
"value": "Value",
|
||||
"label": "Label",
|
||||
"systemInformation": "System Information"
|
||||
"systemInformation": "System Information",
|
||||
"compactView": "Compact View",
|
||||
"fullView": "Full View",
|
||||
"options_withCount_one": "{{count}} option",
|
||||
"options_withCount_other": "{{count}} options"
|
||||
},
|
||||
"hrf": {
|
||||
"hrf": "High Resolution Fix",
|
||||
@@ -258,6 +271,7 @@
|
||||
"status": "Status",
|
||||
"total": "Total",
|
||||
"time": "Time",
|
||||
"credits": "Credits",
|
||||
"pending": "Pending",
|
||||
"in_progress": "In Progress",
|
||||
"completed": "Completed",
|
||||
@@ -768,6 +782,7 @@
|
||||
"description": "Description",
|
||||
"edit": "Edit",
|
||||
"fileSize": "File Size",
|
||||
"filterModels": "Filter models",
|
||||
"fluxRedux": "FLUX Redux",
|
||||
"height": "Height",
|
||||
"huggingFace": "HuggingFace",
|
||||
@@ -787,6 +802,7 @@
|
||||
"hfTokenUnableToVerify": "Unable to Verify HF Token",
|
||||
"hfTokenUnableToVerifyErrorMessage": "Unable to verify HuggingFace token. This is likely due to a network error. Please try again later.",
|
||||
"hfTokenSaved": "HF Token Saved",
|
||||
"hfTokenReset": "HF Token Reset",
|
||||
"urlUnauthorizedErrorMessage": "You may need to configure an API token to access this model.",
|
||||
"urlUnauthorizedErrorMessage2": "Learn how here.",
|
||||
"imageEncoderModelId": "Image Encoder Model ID",
|
||||
@@ -821,16 +837,20 @@
|
||||
"modelUpdated": "Model Updated",
|
||||
"modelUpdateFailed": "Model Update Failed",
|
||||
"name": "Name",
|
||||
"noModelsInstalled": "No Models Installed",
|
||||
"modelPickerFallbackNoModelsInstalled": "No models installed.",
|
||||
"modelPickerFallbackNoModelsInstalled2": "Visit the <LinkComponent>Model Manager</LinkComponent> to install models.",
|
||||
"noModelsInstalledDesc1": "Install models with the",
|
||||
"noModelSelected": "No Model Selected",
|
||||
"noMatchingModels": "No matching Models",
|
||||
"noMatchingModels": "No matching models",
|
||||
"noModelsInstalled": "No models installed",
|
||||
"none": "none",
|
||||
"path": "Path",
|
||||
"pathToConfig": "Path To Config",
|
||||
"predictionType": "Prediction Type",
|
||||
"prune": "Prune",
|
||||
"pruneTooltip": "Prune finished imports from queue",
|
||||
"relatedModels": "Related Models",
|
||||
"showOnlyRelatedModels": "Related",
|
||||
"repo_id": "Repo ID",
|
||||
"repoVariant": "Repo Variant",
|
||||
"scanFolder": "Scan Folder",
|
||||
@@ -871,7 +891,8 @@
|
||||
"installingXModels_one": "Installing {{count}} model",
|
||||
"installingXModels_other": "Installing {{count}} models",
|
||||
"skippingXDuplicates_one": ", skipping {{count}} duplicate",
|
||||
"skippingXDuplicates_other": ", skipping {{count}} duplicates"
|
||||
"skippingXDuplicates_other": ", skipping {{count}} duplicates",
|
||||
"manageModels": "Manage Models"
|
||||
},
|
||||
"models": {
|
||||
"addLora": "Add LoRA",
|
||||
@@ -1093,6 +1114,7 @@
|
||||
"info": "Info",
|
||||
"invoke": {
|
||||
"addingImagesTo": "Adding images to",
|
||||
"modelDisabledForTrial": "Generating with {{modelName}} is not available on trial accounts. Visit your account settings to upgrade.",
|
||||
"invoke": "Invoke",
|
||||
"missingFieldTemplate": "Missing field template",
|
||||
"missingInputForField": "missing input",
|
||||
@@ -1173,7 +1195,8 @@
|
||||
"width": "Width",
|
||||
"gaussianBlur": "Gaussian Blur",
|
||||
"boxBlur": "Box Blur",
|
||||
"staged": "Staged"
|
||||
"staged": "Staged",
|
||||
"modelDisabledForTrial": "Generating with {{modelName}} is not available on trial accounts. Visit your <LinkComponent>account settings</LinkComponent> to upgrade."
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"showDynamicPrompts": "Show Dynamic Prompts",
|
||||
@@ -1312,6 +1335,8 @@
|
||||
"unableToCopyDesc": "Your browser does not support clipboard access. Firefox users may be able to fix this by following ",
|
||||
"unableToCopyDesc_theseSteps": "these steps",
|
||||
"fluxFillIncompatibleWithT2IAndI2I": "FLUX Fill is not compatible with Text to Image or Image to Image. Use other FLUX models for these tasks.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} supports Text to Image only. Use other models for Image to Image, Inpainting and Outpainting tasks.",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supports Text to Image and Image to Image only. Use other models Inpainting and Outpainting tasks.",
|
||||
"problemUnpublishingWorkflow": "Problem Unpublishing Workflow",
|
||||
"problemUnpublishingWorkflowDescription": "There was a problem unpublishing the workflow. Please try again.",
|
||||
"workflowUnpublished": "Workflow Unpublished"
|
||||
@@ -1885,11 +1910,13 @@
|
||||
"addPositivePrompt": "Add $t(controlLayers.prompt)",
|
||||
"addNegativePrompt": "Add $t(controlLayers.negativePrompt)",
|
||||
"addReferenceImage": "Add $t(controlLayers.referenceImage)",
|
||||
"addImageNoise": "Add $t(controlLayers.imageNoise)",
|
||||
"addRasterLayer": "Add $t(controlLayers.rasterLayer)",
|
||||
"addControlLayer": "Add $t(controlLayers.controlLayer)",
|
||||
"addInpaintMask": "Add $t(controlLayers.inpaintMask)",
|
||||
"addRegionalGuidance": "Add $t(controlLayers.regionalGuidance)",
|
||||
"addGlobalReferenceImage": "Add $t(controlLayers.globalReferenceImage)",
|
||||
"addDenoiseLimit": "Add $t(controlLayers.denoiseLimit)",
|
||||
"rasterLayer": "Raster Layer",
|
||||
"controlLayer": "Control Layer",
|
||||
"inpaintMask": "Inpaint Mask",
|
||||
@@ -1987,8 +2014,10 @@
|
||||
"resetCanvasLayers": "Reset Canvas Layers",
|
||||
"resetGenerationSettings": "Reset Generation Settings",
|
||||
"replaceCurrent": "Replace Current",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, <PullBboxButton>pull the bounding box into this layer</PullBboxButton>, or draw on the canvas to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or <PullBboxButton>pull the bounding box into this layer</PullBboxButton> to get started.",
|
||||
"imageNoise": "Image Noise",
|
||||
"denoiseLimit": "Denoise Limit",
|
||||
"warnings": {
|
||||
"problemsFound": "Problems found",
|
||||
"unsupportedModel": "layer not supported for selected base model",
|
||||
@@ -2020,10 +2049,14 @@
|
||||
"ipAdapterMethod": "Mode",
|
||||
"full": "Style and Composition",
|
||||
"fullDesc": "Applies visual style (colors, textures) & composition (layout, structure).",
|
||||
"style": "Style Only",
|
||||
"styleDesc": "Applies visual style (colors, textures) without considering its layout.",
|
||||
"style": "Style (Simple)",
|
||||
"styleDesc": "Applies visual style (colors, textures) without considering its layout. Previously called Style Only.",
|
||||
"composition": "Composition Only",
|
||||
"compositionDesc": "Replicates layout & structure while ignoring the reference's style."
|
||||
"compositionDesc": "Replicates layout & structure while ignoring the reference's style.",
|
||||
"styleStrong": "Style (Strong)",
|
||||
"styleStrongDesc": "Applies a strong visual style, with a slightly reduced composition influence.",
|
||||
"stylePrecise": "Style (Precise)",
|
||||
"stylePreciseDesc": "Applies a precise visual style, eliminating subject influence."
|
||||
},
|
||||
"fluxReduxImageInfluence": {
|
||||
"imageInfluence": "Image Influence",
|
||||
@@ -2393,8 +2426,8 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"CogView4: Support for CogView4 models in Canvas and Workflows.",
|
||||
"Updated Dependencies: Invoke now runs on the latest version of its dependencies, including Python 3.12 and Pytorch 2.6.0."
|
||||
"Inpainting: Per-mask noise levels and denoise limits.",
|
||||
"Canvas: Smarter aspect ratios for SDXL and improved scroll-to-zoom."
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
|
||||
@@ -116,7 +116,19 @@
|
||||
"error_withCount_other": "{{count}} errori",
|
||||
"value": "Valore",
|
||||
"label": "Etichetta",
|
||||
"systemInformation": "Informazioni di sistema"
|
||||
"systemInformation": "Informazioni di sistema",
|
||||
"noMatches": "Nessuna corrispondenza",
|
||||
"noOptions": "Nessuna opzione",
|
||||
"model_withCount_one": "{{count}} modello",
|
||||
"model_withCount_many": "{{count}} modelli",
|
||||
"model_withCount_other": "{{count}} modelli",
|
||||
"options_withCount_one": "{{count}} opzione",
|
||||
"options_withCount_many": "{{count}} opzioni",
|
||||
"options_withCount_other": "{{count}} opzioni",
|
||||
"search": "Cerca",
|
||||
"clear": "Cancella",
|
||||
"compactView": "Vista compatta",
|
||||
"fullView": "Vista completa"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -637,7 +649,14 @@
|
||||
"urlForbidden": "Non hai accesso a questo modello",
|
||||
"urlForbiddenErrorMessage": "Potrebbe essere necessario richiedere l'autorizzazione al sito che distribuisce il modello.",
|
||||
"urlUnauthorizedErrorMessage": "Potrebbe essere necessario configurare un gettone API per accedere a questo modello.",
|
||||
"fileSize": "Dimensione del file"
|
||||
"fileSize": "Dimensione del file",
|
||||
"filterModels": "Filtra i modelli",
|
||||
"modelPickerFallbackNoModelsInstalled": "Nessun modello installato.",
|
||||
"modelPickerFallbackNoModelsInstalled2": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare i modelli.",
|
||||
"manageModels": "Gestione modelli",
|
||||
"hfTokenReset": "Ripristino del gettone HF",
|
||||
"relatedModels": "Modelli correlati",
|
||||
"showOnlyRelatedModels": "Correlati"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -719,7 +738,11 @@
|
||||
"collectionNumberLTExclusiveMin": "{{value}} <= {{exclusiveMinimum}} (excl min)",
|
||||
"collectionEmpty": "raccolta vuota",
|
||||
"batchNodeCollectionSizeMismatchNoGroupId": "Dimensione della raccolta di gruppo nel Lotto non corrisponde",
|
||||
"modelIncompatibleBboxWidth": "La larghezza del riquadro di delimitazione è {{width}} ma {{model}} richiede multipli di {{multiple}}"
|
||||
"modelIncompatibleBboxWidth": "La larghezza del riquadro di delimitazione è {{width}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelIncompatibleBboxHeight": "L'altezza del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelIncompatibleScaledBboxWidth": "La larghezza scalata del riquadro è {{width}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "L'altezza scalata del riquadro è {{height}} ma {{model}} richiede multipli di {{multiple}}",
|
||||
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Accedi alle impostazioni del tuo account per effettuare l'upgrade."
|
||||
},
|
||||
"useCpuNoise": "Usa la CPU per generare rumore",
|
||||
"iterations": "Iterazioni",
|
||||
@@ -746,7 +769,8 @@
|
||||
"sendToCanvas": "Invia alla Tela",
|
||||
"coherenceMinDenoise": "Min rid. rumore",
|
||||
"recallMetadata": "Richiama i metadati",
|
||||
"disabledNoRasterContent": "Disabilitato (nessun contenuto Raster)"
|
||||
"disabledNoRasterContent": "Disabilitato (nessun contenuto Raster)",
|
||||
"modelDisabledForTrial": "La generazione con {{modelName}} non è disponibile per gli account di prova. Visita le <LinkComponent>impostazioni account</LinkComponent> per effettuare l'upgrade."
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelli",
|
||||
@@ -855,7 +879,12 @@
|
||||
"unableToCopy": "Impossibile copiare",
|
||||
"unableToCopyDesc": "Il tuo browser non supporta l'accesso agli appunti. Gli utenti di Firefox potrebbero risolvere il problema seguendo ",
|
||||
"unableToCopyDesc_theseSteps": "questi passaggi",
|
||||
"fluxFillIncompatibleWithT2IAndI2I": "FLUX Fill non è compatibile con Testo a Immagine o Immagine a Immagine. Per queste attività, utilizzare altri modelli FLUX."
|
||||
"fluxFillIncompatibleWithT2IAndI2I": "FLUX Fill non è compatibile con Testo a Immagine o Immagine a Immagine. Per queste attività, utilizzare altri modelli FLUX.",
|
||||
"problemUnpublishingWorkflow": "Problema durante l'annullamento della pubblicazione del flusso di lavoro",
|
||||
"problemUnpublishingWorkflowDescription": "Si è verificato un problema durante l'annullamento della pubblicazione del flusso di lavoro. Riprova.",
|
||||
"workflowUnpublished": "Flusso di lavoro non pubblicato",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supporta solo la conversione da testo a immagine e da immagine a immagine. Utilizza altri modelli per le attività di Inpainting e Outpainting.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} supporta solo la generazione da testo a immagine. Utilizza altri modelli per le attività di conversione da immagine a immagine, inpainting e outpainting."
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Barra di avanzamento generazione",
|
||||
@@ -1049,18 +1078,19 @@
|
||||
"unknownField_withName": "Campo \"{{name}}\" sconosciuto",
|
||||
"missingField_withName": "Campo \"{{name}}\" mancante",
|
||||
"unknownFieldEditWorkflowToFix_withName": "Il flusso di lavoro contiene un campo \"{{name}}\" sconosciuto .\nModifica il flusso di lavoro per risolvere il problema.",
|
||||
"unexpectedField_withName": "Campo \"{{name}}\" inaspettato"
|
||||
"unexpectedField_withName": "Campo \"{{name}}\" inaspettato",
|
||||
"missingSourceOrTargetHandle": "Identificatore del nodo sorgente o di destinazione mancante"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
"menuItemAutoAdd": "Aggiungi automaticamente a questa bacheca",
|
||||
"cancel": "Annulla",
|
||||
"addBoard": "Aggiungi Bacheca",
|
||||
"bottomMessage": "L'eliminazione di questa bacheca e delle sue immagini ripristinerà tutte le funzionalità che le stanno attualmente utilizzando.",
|
||||
"bottomMessage": "L'eliminazione delle immagini reimposterà tutte le funzionalità che le stanno utilizzando.",
|
||||
"changeBoard": "Cambia Bacheca",
|
||||
"loading": "Caricamento in corso ...",
|
||||
"clearSearch": "Cancella Ricerca",
|
||||
"topMessage": "Questa bacheca contiene immagini utilizzate nelle seguenti funzionalità:",
|
||||
"topMessage": "Questa selezione contiene immagini utilizzate nelle seguenti funzionalità:",
|
||||
"move": "Sposta",
|
||||
"myBoard": "Bacheca",
|
||||
"searchBoard": "Cerca bacheche ...",
|
||||
@@ -1071,7 +1101,7 @@
|
||||
"deleteBoardOnly": "solo la Bacheca",
|
||||
"deleteBoard": "Elimina Bacheca",
|
||||
"deleteBoardAndImages": "Bacheca e Immagini",
|
||||
"deletedBoardsCannotbeRestored": "Le bacheche eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\" le immagini verranno spostate nella bacheca \"Non categorizzato\".",
|
||||
"deletedBoardsCannotbeRestored": "Le bacheche e le immagini eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\" le immagini verranno spostate in uno stato non categorizzato.",
|
||||
"movingImagesToBoard_one": "Spostare {{count}} immagine nella bacheca:",
|
||||
"movingImagesToBoard_many": "Spostare {{count}} immagini nella bacheca:",
|
||||
"movingImagesToBoard_other": "Spostare {{count}} immagini nella bacheca:",
|
||||
@@ -1093,8 +1123,11 @@
|
||||
"noBoards": "Nessuna bacheca {{boardType}}",
|
||||
"hideBoards": "Nascondi bacheche",
|
||||
"viewBoards": "Visualizza bacheche",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Le bacheche cancellate non possono essere ripristinate. Selezionando 'Cancella solo bacheca', le immagini verranno spostate nella bacheca \"Non categorizzato\" privata dell'autore dell'immagine.",
|
||||
"updateBoardError": "Errore durante l'aggiornamento della bacheca"
|
||||
"deletedPrivateBoardsCannotbeRestored": "Le bacheche e le immagini eliminate non possono essere ripristinate. Selezionando \"Elimina solo bacheca\", le immagini verranno spostate in uno stato privato e non categorizzato per l'autore dell'immagine.",
|
||||
"updateBoardError": "Errore durante l'aggiornamento della bacheca",
|
||||
"uncategorizedImages": "Immagini non categorizzate",
|
||||
"deleteAllUncategorizedImages": "Elimina tutte le immagini non categorizzate",
|
||||
"deletedImagesCannotBeRestored": "Le immagini eliminate non possono essere ripristinate."
|
||||
},
|
||||
"queue": {
|
||||
"queueFront": "Aggiungi all'inizio della coda",
|
||||
@@ -1178,7 +1211,8 @@
|
||||
"cancelAllExceptCurrentTooltip": "Annulla tutto tranne l'elemento corrente",
|
||||
"retrySucceeded": "Elemento rieseguito",
|
||||
"retryItem": "Riesegui elemento",
|
||||
"retryFailed": "Problema riesecuzione elemento"
|
||||
"retryFailed": "Problema riesecuzione elemento",
|
||||
"credits": "Crediti"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "Nessun modello corrispondente",
|
||||
@@ -1821,7 +1855,10 @@
|
||||
"publishingValidationRunInProgress": "È in corso la convalida della pubblicazione.",
|
||||
"publishedWorkflowsLocked": "I flussi di lavoro pubblicati sono bloccati e non possono essere modificati o eseguiti. Annulla la pubblicazione del flusso di lavoro o salva una copia per modificare o eseguire questo flusso di lavoro.",
|
||||
"warningWorkflowHasNoPublishableInputFields": "Nessun campo di ingresso pubblicabile selezionato: il flusso di lavoro pubblicato verrà eseguito solo con i valori predefiniti",
|
||||
"publishInProgress": "Pubblicazione in corso"
|
||||
"publishInProgress": "Pubblicazione in corso",
|
||||
"selectingOutputNode": "Selezione del nodo di uscita",
|
||||
"selectingOutputNodeDesc": "Fare clic su un nodo per selezionarlo come nodo di uscita del flusso di lavoro.",
|
||||
"errorWorkflowHasUnpublishableNodes": "Il flusso di lavoro ha nodi di estrazione lotto, generatore o metadati"
|
||||
},
|
||||
"loadMore": "Carica altro",
|
||||
"searchPlaceholder": "Cerca per nome, descrizione o etichetta",
|
||||
@@ -1971,12 +2008,16 @@
|
||||
"stagingOnCanvas": "Genera immagini nella",
|
||||
"ipAdapterMethod": {
|
||||
"full": "Stile e Composizione",
|
||||
"style": "Solo Stile",
|
||||
"style": "Stile (semplice)",
|
||||
"composition": "Solo Composizione",
|
||||
"ipAdapterMethod": "Modalità",
|
||||
"fullDesc": "Applica lo stile visivo (colori, texture) e la composizione (disposizione, struttura).",
|
||||
"styleDesc": "Applica lo stile visivo (colori, texture) senza considerare la disposizione.",
|
||||
"compositionDesc": "Replica disposizione e struttura ignorando lo stile di riferimento."
|
||||
"styleDesc": "Applica lo stile visivo (colori, texture) senza considerare la disposizione. Precedentemente chiamato \"Solo stile\".",
|
||||
"compositionDesc": "Replica disposizione e struttura ignorando lo stile di riferimento.",
|
||||
"styleStrong": "Stile (forte)",
|
||||
"styleStrongDesc": "Applica uno stile visivo forte, con un'influenza sulla composizione leggermente ridotta.",
|
||||
"stylePrecise": "Stile (preciso)",
|
||||
"stylePreciseDesc": "Applica uno stile visivo preciso, eliminando l'influenza del soggetto."
|
||||
},
|
||||
"showingType": "Mostra {{type}}",
|
||||
"dynamicGrid": "Griglia dinamica",
|
||||
@@ -2258,7 +2299,7 @@
|
||||
"replaceCurrent": "Sostituisci corrente",
|
||||
"mergeDown": "Unire in basso",
|
||||
"mergingLayers": "Unione dei livelli",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare.",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello, <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> oppure disegna sulla tela per iniziare.",
|
||||
"useImage": "Usa immagine",
|
||||
"resetGenerationSettings": "Ripristina impostazioni di generazione",
|
||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton>, oppure <PullBboxButton>trascina il riquadro di delimitazione in questo livello</PullBboxButton> su questo livello.",
|
||||
@@ -2299,7 +2340,19 @@
|
||||
"errors": {
|
||||
"unableToFindImage": "Impossibile trovare l'immagine",
|
||||
"unableToLoadImage": "Impossibile caricare l'immagine"
|
||||
}
|
||||
},
|
||||
"fluxReduxImageInfluence": {
|
||||
"high": "Alta",
|
||||
"low": "Basso",
|
||||
"imageInfluence": "Influenza dell'immagine",
|
||||
"lowest": "Il più basso",
|
||||
"medium": "Medio",
|
||||
"highest": "La più alta"
|
||||
},
|
||||
"denoiseLimit": "Limite di riduzione del rumore",
|
||||
"addImageNoise": "Aggiungi $t(controlLayers.imageNoise)",
|
||||
"addDenoiseLimit": "Aggiungi $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Rumore dell'immagine"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2399,8 +2452,8 @@
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"Flussi di lavoro: supporto per menu a discesa di stringhe personalizzate nel Generatore di Flussi di lavoro.",
|
||||
"FLUX: supporto per FLUX Fill in Flussi di lavoro e Tela."
|
||||
"Inpainting: livelli di rumore per maschera e limiti di denoise.",
|
||||
"Canvas: proporzioni più intelligenti per SDXL e scorrimento e zoom migliorati."
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
|
||||
@@ -118,7 +118,15 @@
|
||||
"value": "値",
|
||||
"label": "ラベル",
|
||||
"saveChanges": "変更を保存",
|
||||
"error_withCount_other": "{{count}} 個のエラー"
|
||||
"error_withCount_other": "{{count}} 個のエラー",
|
||||
"noMatches": "合致しません",
|
||||
"model_withCount_other": "{{count}}個のモデル",
|
||||
"noOptions": "オプションがありません",
|
||||
"search": "検索",
|
||||
"clear": "クリア",
|
||||
"compactView": "コンパクトビュー",
|
||||
"fullView": "フルビュー",
|
||||
"options_withCount_other": "{{count}}個のオプション"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "画像のサイズ",
|
||||
@@ -384,7 +392,7 @@
|
||||
"title": "全選択"
|
||||
},
|
||||
"addNode": {
|
||||
"desc": "ノード追加メニューを開く.",
|
||||
"desc": "ノード追加メニューを開く。",
|
||||
"title": "ノードを追加"
|
||||
},
|
||||
"pasteSelectionWithEdges": {
|
||||
@@ -583,7 +591,7 @@
|
||||
"deleteModelImage": "モデル画像を削除",
|
||||
"hfTokenInvalid": "ハギングフェイストークンが無効または見つかりません",
|
||||
"hfForbiddenErrorMessage": "リポジトリにアクセスすることを勧めます.所有者はダウンロードにあたり利用規約への同意を要求する場合があります.",
|
||||
"noModelsInstalled": "インストールされているモデルなし",
|
||||
"noModelsInstalled": "インストールされているモデルがありません",
|
||||
"pathToConfig": "設定へのパス",
|
||||
"noModelsInstalledDesc1": "モデルを一緒にインストール",
|
||||
"pruneTooltip": "完了したインポートをキューから削除",
|
||||
@@ -639,7 +647,14 @@
|
||||
"urlUnauthorizedErrorMessage": "このモデルにアクセスするためにAPIトークンを構成する必要があるかもしれません.",
|
||||
"urlUnauthorizedErrorMessage2": "ここでどうやるか学びます.",
|
||||
"inplaceInstall": "定位置にインストール",
|
||||
"fileSize": "ファイルサイズ"
|
||||
"fileSize": "ファイルサイズ",
|
||||
"modelPickerFallbackNoModelsInstalled2": "<LinkComponent>モデルマネージャー</LinkComponent> にアクセスしてモデルをインストールしてください.",
|
||||
"filterModels": "フィルターモデル",
|
||||
"modelPickerFallbackNoModelsInstalled": "モデルがインストールされていません.",
|
||||
"manageModels": "モデル管理",
|
||||
"hfTokenReset": "ハギングフェイストークンリセット",
|
||||
"relatedModels": "関連のあるモデル",
|
||||
"showOnlyRelatedModels": "関連している"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "画像",
|
||||
@@ -684,7 +699,28 @@
|
||||
"collectionNumberGTMax": "{{value}} > {{maximum}} (最大増加)",
|
||||
"missingNodeTemplate": "ノードテンプレートの欠落",
|
||||
"batchNodeNotConnected": "バッチノードが: {{label}}につながっていない",
|
||||
"collectionNumberLTMin": "{{value}} < {{minimum}} (最小増加)"
|
||||
"collectionNumberLTMin": "{{value}} < {{minimum}} (最小増加)",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), スケーリングされたbboxの高さは{{height}}です",
|
||||
"fluxModelMultipleControlLoRAs": "コントロールLoRAは1度に1つしか使用できません",
|
||||
"noPrompts": "プロンプトが生成されません",
|
||||
"noNodesInGraph": "グラフにノードがありません",
|
||||
"noCLIPEmbedModelSelected": "FLUX生成にCLIPエンベッドモデルが選択されていません",
|
||||
"canvasIsFiltering": "キャンバスがビジー状態(フィルタリング)",
|
||||
"canvasIsCompositing": "キャンバスがビジー状態(合成)",
|
||||
"systemDisconnected": "システムが切断されました",
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), 拡大縮小されたbboxの幅は{{width}}です",
|
||||
"canvasIsTransforming": "キャンバスがビジー状態(変換)",
|
||||
"canvasIsRasterizing": "キャンバスがビジー状態(ラスタライズ)",
|
||||
"modelIncompatibleBboxHeight": "Bboxの高さは{{height}}ですが,{{model}}は{{multiple}}の倍数が必要です",
|
||||
"modelIncompatibleScaledBboxHeight": "bboxの高さは{{height}}ですが,{{model}}は{{multiple}}の倍数を必要です",
|
||||
"modelIncompatibleBboxWidth": "Bboxの幅は{{width}}ですが, {{model}}は{{multiple}}の倍数が必要です",
|
||||
"modelIncompatibleScaledBboxWidth": "bboxの幅は{{width}}ですが,{{model}}は{{multiple}}の倍数が必要です",
|
||||
"canvasIsSelectingObject": "キャンバスがビジー状態(オブジェクトの選択)",
|
||||
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bboxの幅は{{width}}です",
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bboxの高さは{{height}}です",
|
||||
"noFLUXVAEModelSelected": "FLUX生成にVAEモデルが選択されていません",
|
||||
"noT5EncoderModelSelected": "FLUX生成にT5エンコーダモデルが選択されていません",
|
||||
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アカウント設定にアクセスしてアップグレードしてください。"
|
||||
},
|
||||
"aspect": "縦横比",
|
||||
"lockAspectRatio": "縦横比を固定",
|
||||
@@ -716,7 +752,24 @@
|
||||
"cfgRescaleMultiplier": "CFGリスケール倍率",
|
||||
"clipSkip": "クリップスキップ",
|
||||
"guidance": "ガイダンス",
|
||||
"infillMethod": "充填法"
|
||||
"infillMethod": "充填法",
|
||||
"patchmatchDownScaleSize": "ダウンスケール",
|
||||
"boxBlur": "ボックスぼかし",
|
||||
"remixImage": "リミックス画像",
|
||||
"processImage": "プロセス画像",
|
||||
"useCpuNoise": "CPUノイズの使用",
|
||||
"staged": "ステージ",
|
||||
"perlinNoise": "パーリン・ノイズ(グラデーションノイズ)",
|
||||
"imageActions": "画像処理",
|
||||
"gaussianBlur": "ガウスぼかし",
|
||||
"noiseThreshold": "ノイズの閾値",
|
||||
"maskBlur": "マスクぼかし",
|
||||
"seamlessYAxis": "シームレスなY軸",
|
||||
"optimizedImageToImage": "イメージ to イメージの最適化",
|
||||
"symmetry": "左右対称",
|
||||
"seamlessXAxis": "シームレスなX軸",
|
||||
"sendToCanvas": "キャンバスに送る",
|
||||
"modelDisabledForTrial": "{{modelName}} を使用した生成はトライアルアカウントではご利用いただけません.アップグレードするには,<LinkComponent>アカウント設定</LinkComponent> にアクセスしてください."
|
||||
},
|
||||
"settings": {
|
||||
"models": "モデル",
|
||||
@@ -728,16 +781,101 @@
|
||||
"resetComplete": "WebUIはリセットされました。",
|
||||
"ui": "ユーザーインターフェイス",
|
||||
"beta": "ベータ",
|
||||
"developer": "開発者"
|
||||
"developer": "開発者",
|
||||
"antialiasProgressImages": "アンチエイリアスの経過画像",
|
||||
"enableInformationalPopovers": "情報ポップオーバーを有効にする",
|
||||
"enableModelDescriptions": "ドロップダウンでモデルの説明を有効にする",
|
||||
"confirmOnNewSession": "新しいセッションで確認する",
|
||||
"informationalPopoversDisabled": "情報ポップオーバーが無効になっています",
|
||||
"informationalPopoversDisabledDesc": "情報ポップオーバーが無効になっています.設定で有効にしてください.",
|
||||
"enableNSFWChecker": "NSFWチェッカーを有効にする",
|
||||
"enableInvisibleWatermark": "目に見えない透かしを有効にする",
|
||||
"enableHighlightFocusedRegions": "重点領域を強調表示",
|
||||
"clearIntermediatesDesc1": "中間物をクリアすると、キャンバスとコントロールネットの状態がリセットされます.",
|
||||
"showProgressInViewer": "ビューアで進行状況画像を表示する",
|
||||
"modelDescriptionsDisabled": "ドロップダウンのモデル説明が無効になっています",
|
||||
"modelDescriptionsDisabledDesc": "ドロップダウンのモデル説明が無効になっています.設定で有効にしてください.",
|
||||
"clearIntermediatesDisabled": "中間物をクリアするにはキューが空でなければなりません",
|
||||
"clearIntermediatesDesc2": "中間画像は生成時に生成される副産物であり、ギャラリーに表示される結果画像とは異なります.中間画像を削除するとディスク容量が解放されます.",
|
||||
"intermediatesClearedFailed": "中間物をクリアする問題",
|
||||
"reloadingIn": "リロード中",
|
||||
"clearIntermediatesDesc3": "ギャラリー画像は削除されません.",
|
||||
"clearIntermediates": "中間物をクリア",
|
||||
"clearIntermediatesWithCount_other": "{{count}} 個の中間物をクリア",
|
||||
"intermediatesCleared_other": "{{count}}個の中間物がクリアされました",
|
||||
"general": "一般",
|
||||
"generation": "生成",
|
||||
"showDetailedInvocationProgress": "進捗状況の詳細を表示"
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "アップロード失敗",
|
||||
"imageCopied": "画像をコピー",
|
||||
"imageUploadFailed": "画像のアップロードに失敗しました",
|
||||
"uploadFailedInvalidUploadDesc": "画像はPNGかJPGである必要があります。",
|
||||
"uploadFailedInvalidUploadDesc": "画像はPNGかJPGかWEBPである必要があります .",
|
||||
"sentToUpscale": "アップスケーラーに転送しました",
|
||||
"imageUploaded": "画像をアップロードしました",
|
||||
"serverError": "サーバーエラー"
|
||||
"serverError": "サーバーエラー",
|
||||
"prunedQueue": "キューを破棄",
|
||||
"workflowDeleted": "ワークフローが削除されました",
|
||||
"unableToLoadStylePreset": "スタイルプリセットをロードできません",
|
||||
"loadedWithWarnings": "ワークフローが警告付きでロードされました",
|
||||
"parameters": "パラメーター",
|
||||
"parameterSet": "パラメーターが呼び出されました",
|
||||
"pasteSuccess": "{{destination}} に貼り付けました",
|
||||
"imagesWillBeAddedTo": "アップロードされた画像はボード {{boardName}} のアセットに追加されます.",
|
||||
"layerCopiedToClipboard": "レイヤーがクリップボードにコピーされました",
|
||||
"pasteFailed": "貼り付け失敗",
|
||||
"imageSavingFailed": "画像保存に失敗しました",
|
||||
"importSuccessful": "インポートが成功しました",
|
||||
"problemDownloadingImage": "画像をダウンロードできません",
|
||||
"modelAddedSimple": "モデルがキューに追加されました",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "PNG、JPEG、または WEBP 画像は最大 1 つにする必要があります.",
|
||||
"outOfMemoryErrorDesc": "現在の生成設定はシステム容量を超えています.設定を調整してもう一度お試しください.",
|
||||
"parametersSet": "パラメーターが呼び出されました",
|
||||
"modelImportCanceled": "モデルのインポートがキャンセルされました",
|
||||
"problemRetrievingWorkflow": "ワークフローを取得した問題",
|
||||
"problemUnpublishingWorkflow": "取り消されたワークフローの問題",
|
||||
"parametersNotSet": "パラメーターが呼び出されていません",
|
||||
"problemCopyingImage": "画像をコピーできません",
|
||||
"baseModelChanged": "ベースモデルが変更されました",
|
||||
"baseModelChangedCleared_other": "{{count}} 個の互換性のないサブモデルをクリア,または無効にしました",
|
||||
"canceled": "処理がキャンセルされました",
|
||||
"connected": "サーバーに接続されました",
|
||||
"linkCopied": "リンクがコピーされました",
|
||||
"unableToLoadImage": "画像をロードできません",
|
||||
"unableToLoadImageMetadata": "画像のメタデータをロードできません",
|
||||
"imageSaved": "画像が保存されました",
|
||||
"importFailed": "インポートに失敗しました",
|
||||
"invalidUpload": "無効なアップロードです",
|
||||
"outOfMemoryError": "メモリ不足エラー",
|
||||
"parameterSetDesc": "{{parameter}}を呼び出し",
|
||||
"errorCopied": "エラーがコピーされました",
|
||||
"sentToCanvas": "キャンバスに送信",
|
||||
"setControlImage": "コントロール画像としてセット",
|
||||
"workflowLoaded": "ワークフローがロードされました",
|
||||
"unableToCopy": "コピーできません",
|
||||
"unableToCopyDesc": "あなたのブラウザはクリップボードアクセスをサポートしていません.Firefoxユーザーの場合は、以下の手順で修正できる可能性があります. ",
|
||||
"fluxFillIncompatibleWithT2IAndI2I": "FLUX Fillは、テキストから画像へ、または画像から画像へ変換機能と互換性がありません.これらのタスクには、他のFLUXモデルをご利用ください.",
|
||||
"problemUnpublishingWorkflowDescription": "取り下げられたワークフローの問題がありました.もう一度試してください.",
|
||||
"workflowUnpublished": "ワークフローが取り消されました",
|
||||
"sessionRef": "セッション: {{sessionId}}",
|
||||
"somethingWentWrong": "問題が発生しました",
|
||||
"unableToCopyDesc_theseSteps": "これらのステップ数",
|
||||
"stylePresetLoaded": "スタイルプリセットがロードされました",
|
||||
"parameterNotSetDescWithMessage": "{{parameter}}: {{message}}を呼び出せません",
|
||||
"problemCopyingLayer": "レイヤーをコピーできません",
|
||||
"problemSavingLayer": "レイヤー保存ができません",
|
||||
"setNodeField": "ノードフィールドとしてセット",
|
||||
"layerSavedToAssets": "レイヤーがアセットに保存されました",
|
||||
"outOfMemoryErrorDescLocal": "OOM を削減するには、<LinkComponent>低 VRAM ガイド</LinkComponent> に従ってください.",
|
||||
"parameterNotSet": "パラメーターが呼び出されていません",
|
||||
"addedToBoard": "{{name}} 個の資産をボードに追加しました",
|
||||
"addedToUncategorized": "$t(boards.uncategorized)個のアセットがボードに追加されました",
|
||||
"problemDeletingWorkflow": "ワークフローが削除された問題",
|
||||
"imageNotLoadedDesc": "画像を見つけられません",
|
||||
"parameterNotSetDesc": "{{parameter}}を呼び出せません",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4oは,テキストから画像への生成と画像から画像への生成のみをサポートしています.インペインティングおよび,アウトペインティングタスクには他のモデルを使用してください.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} はテキストから画像への変換のみをサポートしています. 画像から画像への変換, インペインティング,アウトペインティングのタスクには他のモデルを使用してください."
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "進捗バー",
|
||||
@@ -862,7 +1000,8 @@
|
||||
"batchSize": "バッチサイズ",
|
||||
"retryFailed": "項目のリトライに問題があります",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog": "現在の項目を除くすべてのキュー項目をキャンセルすると、保留中の項目は停止しますが、進行中の項目は完了します。",
|
||||
"retrySucceeded": "項目がリトライされました"
|
||||
"retrySucceeded": "項目がリトライされました",
|
||||
"credits": "クレジット"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "一致するモデルがありません",
|
||||
@@ -1017,11 +1156,11 @@
|
||||
"unknownField": "不明なフィールド",
|
||||
"unexpectedField_withName": "予期しないフィールド\"{{name}}\"",
|
||||
"loadingTemplates": "読み込み中 {{name}}",
|
||||
"validateConnectionsHelp": "無効な接続が行われたり,無効なグラフが呼び出されたりしないようにします.",
|
||||
"validateConnectionsHelp": "無効な接続が行われたり,無効なグラフが呼び出されたりしないようにします",
|
||||
"validateConnections": "接続とグラフを確認する",
|
||||
"saveToGallery": "ギャラリーに保存",
|
||||
"newWorkflowDesc": "新しいワークフローを作りますか?",
|
||||
"unknownFieldType": "$t(nodes.unknownField)型:{type}}",
|
||||
"unknownFieldType": "$t(nodes.unknownField)型: {{type}}",
|
||||
"unsupportedArrayItemType": "サポートされていない配列項目型です \"{{type}}\"",
|
||||
"unableToLoadWorkflow": "ワークフローが読み込めません",
|
||||
"unableToValidateWorkflow": "ワークフローを確認できません",
|
||||
@@ -1064,13 +1203,13 @@
|
||||
"downloadBoard": "ボードをダウンロード",
|
||||
"changeBoard": "ボードを変更",
|
||||
"loading": "ロード中...",
|
||||
"topMessage": "このボードには、以下の機能で使用されている画像が含まれています:",
|
||||
"bottomMessage": "このボードおよび画像を削除すると、現在これらを利用している機能はリセットされます。",
|
||||
"topMessage": "この選択には、次の機能で使用される画像が含まれています:",
|
||||
"bottomMessage": "この画像を削除すると、現在利用している機能はリセットされます。",
|
||||
"clearSearch": "検索をクリア",
|
||||
"deleteBoard": "ボードの削除",
|
||||
"deleteBoardAndImages": "ボードと画像の削除",
|
||||
"deleteBoardOnly": "ボードのみ削除",
|
||||
"deletedBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像は未分類に移動されます。",
|
||||
"deletedBoardsCannotbeRestored": "削除したボードと画像は復元できません。「ボードのみ削除」を選択すると、画像は未分類の状態になります。",
|
||||
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:",
|
||||
"hideBoards": "ボードを隠す",
|
||||
"assetsWithCount_other": "{{count}} のアセット",
|
||||
@@ -1085,9 +1224,12 @@
|
||||
"imagesWithCount_other": "{{count}} の画像",
|
||||
"updateBoardError": "ボード更新エラー",
|
||||
"selectedForAutoAdd": "自動追加に選択済み",
|
||||
"deletedPrivateBoardsCannotbeRestored": "削除されたボードは復元できません。\"ボードのみ削除\"を選択すると画像はその作成者のプライベートな未分類に移動されます。",
|
||||
"deletedPrivateBoardsCannotbeRestored": "削除されたボードと画像は復元できません。「ボードのみ削除」を選択すると、画像は作成者に対して非公開の未分類状態になります。",
|
||||
"noBoards": "{{boardType}} ボードがありません",
|
||||
"viewBoards": "ボードを表示"
|
||||
"viewBoards": "ボードを表示",
|
||||
"uncategorizedImages": "分類されていない画像",
|
||||
"deleteAllUncategorizedImages": "分類されていないすべての画像を削除",
|
||||
"deletedImagesCannotBeRestored": "削除した画像は復元できません."
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "呼び出しキャッシュ",
|
||||
@@ -1110,86 +1252,427 @@
|
||||
"paramRatio": {
|
||||
"heading": "縦横比",
|
||||
"paragraphs": [
|
||||
"生成された画像の縦横比。"
|
||||
"生成された画像の縦横比。",
|
||||
"SD1.5 モデルの場合は 512x512 に相当する画像サイズ (ピクセル数) が推奨され, SDXL モデルの場合は 1024x1024 に相当するサイズが推奨されます."
|
||||
]
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
"heading": "領域ガイダンスと領域参照画像"
|
||||
"heading": "領域ガイダンスと領域参照画像",
|
||||
"paragraphs": [
|
||||
"領域ガイダンスの場合は,ブラシを使用して,グローバルプロンプトの要素が表示される場所をガイドします.",
|
||||
"領域参照画像の場合は,ブラシを使用して特定の領域に参照画像を適用します."
|
||||
]
|
||||
},
|
||||
"regionalReferenceImage": {
|
||||
"heading": "領域参照画像"
|
||||
"heading": "領域参照画像",
|
||||
"paragraphs": [
|
||||
"特定の領域に参照画像を適用するためのブラシ."
|
||||
]
|
||||
},
|
||||
"paramScheduler": {
|
||||
"heading": "スケジューラー"
|
||||
"heading": "スケジューラー",
|
||||
"paragraphs": [
|
||||
"スケジューラーは生成中のプロセスで使用されます.",
|
||||
"各スケジューラは、画像にノイズを反復的に追加する方法や、モデルの出力に基づいてサンプルを更新する方法を定義します."
|
||||
]
|
||||
},
|
||||
"regionalGuidance": {
|
||||
"heading": "領域ガイダンス"
|
||||
"heading": "領域ガイダンス",
|
||||
"paragraphs": [
|
||||
"グローバルプロンプトの要素が表示される場所をガイドするブラシ."
|
||||
]
|
||||
},
|
||||
"rasterLayer": {
|
||||
"heading": "ラスターレイヤー"
|
||||
"heading": "ラスターレイヤー",
|
||||
"paragraphs": [
|
||||
"画像生成中に使用される,キャンバスのピクセルベースのコンテンツ."
|
||||
]
|
||||
},
|
||||
"globalReferenceImage": {
|
||||
"heading": "全域参照画像"
|
||||
"heading": "全域参照画像",
|
||||
"paragraphs": [
|
||||
"参照画像を適用して,生成全体に影響を及ぼします."
|
||||
]
|
||||
},
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "アップスケール手法"
|
||||
"heading": "アップスケール手法",
|
||||
"paragraphs": [
|
||||
"高解像度修正のために画像を拡大するために使用される方法。"
|
||||
]
|
||||
},
|
||||
"upscaleModel": {
|
||||
"heading": "アップスケールモデル"
|
||||
"heading": "アップスケールモデル",
|
||||
"paragraphs": [
|
||||
"アップスケールモデルは、ディテールを追加する前に画像を出力サイズに合わせて拡大縮小します。サポートされているアップスケールモデルであればどれでも使用できますが、写真や線画など、特定の種類の画像に特化したモデルもあります。"
|
||||
]
|
||||
},
|
||||
"paramAspect": {
|
||||
"heading": "縦横比"
|
||||
"heading": "縦横比",
|
||||
"paragraphs": [
|
||||
"生成される画像のアスペクト比。比率を変更すると、幅と高さもそれに応じて更新されます。",
|
||||
"「最適化」は、選択したモデルの幅と高さを最適な寸法に設定します。"
|
||||
]
|
||||
},
|
||||
"refinerSteps": {
|
||||
"heading": "ステップ"
|
||||
"heading": "ステップ",
|
||||
"paragraphs": [
|
||||
"生成プロセスのリファイナー部分で実行されるステップの数。",
|
||||
"生成ステップと似ています。"
|
||||
]
|
||||
},
|
||||
"paramVAE": {
|
||||
"heading": "VAE"
|
||||
"heading": "VAE",
|
||||
"paragraphs": [
|
||||
"AI 出力を最終画像に変換するために使用されるモデル。"
|
||||
]
|
||||
},
|
||||
"scale": {
|
||||
"heading": "スケール"
|
||||
"heading": "スケール",
|
||||
"paragraphs": [
|
||||
"スケールは出力画像のサイズを制御し、入力画像の解像度の倍数に基づいて決定されます。例えば、1024x1024の画像を2倍に拡大すると、2048x2048の出力が生成されます。"
|
||||
]
|
||||
},
|
||||
"refinerScheduler": {
|
||||
"heading": "スケジューラー"
|
||||
"heading": "スケジューラー",
|
||||
"paragraphs": [
|
||||
"生成プロセスのリファイナー部分で使用されるスケジューラ。",
|
||||
"生成スケジューラに似ています。"
|
||||
]
|
||||
},
|
||||
"compositingCoherenceMode": {
|
||||
"heading": "モード"
|
||||
"heading": "モード",
|
||||
"paragraphs": [
|
||||
"新しく生成されたマスク領域と,一貫性のある画像を作成するために使用される方法."
|
||||
]
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "モデル"
|
||||
"heading": "モデル",
|
||||
"paragraphs": [
|
||||
"生成に使用されるモデル。異なるモデルは、異なる美的結果とコンテンツを生成するように特化するようにトレーニングされています。"
|
||||
]
|
||||
},
|
||||
"paramHeight": {
|
||||
"heading": "高さ"
|
||||
"heading": "高さ",
|
||||
"paragraphs": [
|
||||
"生成される画像の高さ。8の倍数にする必要があります。"
|
||||
]
|
||||
},
|
||||
"paramSteps": {
|
||||
"heading": "ステップ"
|
||||
"heading": "ステップ",
|
||||
"paragraphs": [
|
||||
"各生成で実行されるステップの数.",
|
||||
"通常, ステップ数が多いほど, より高品質な画像が作成されますが生成時間も長くなります."
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "モード"
|
||||
"heading": "モード",
|
||||
"paragraphs": [
|
||||
"モードは参照画像が生成プロセスをどのようにガイドするかを定義します."
|
||||
]
|
||||
},
|
||||
"paramSeed": {
|
||||
"heading": "シード"
|
||||
"heading": "シード",
|
||||
"paragraphs": [
|
||||
"生成に使用する始動ノイズを制御します.",
|
||||
"同じ生成設定で同一の結果を生成するには, 「ランダム」オプションを無効にします."
|
||||
]
|
||||
},
|
||||
"paramIterations": {
|
||||
"heading": "生成回数"
|
||||
"heading": "生成回数",
|
||||
"paragraphs": [
|
||||
"生成する画像の数。",
|
||||
"動的プロンプトが有効になっている場合、各プロンプトはこの回数生成されます。"
|
||||
]
|
||||
},
|
||||
"controlNet": {
|
||||
"heading": "ControlNet"
|
||||
"heading": "ControlNet",
|
||||
"paragraphs": [
|
||||
"コントロールネットは生成プロセスへのガイダンスを提供し,選択したモデルに応じて制御された構成,構造,またはスタイルを持つ画像の作成に役立ちます."
|
||||
]
|
||||
},
|
||||
"paramWidth": {
|
||||
"heading": "幅"
|
||||
"heading": "幅",
|
||||
"paragraphs": [
|
||||
"生成される画像の幅。8の倍数にする必要があります。"
|
||||
]
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA"
|
||||
"heading": "LoRA",
|
||||
"paragraphs": [
|
||||
"ベースモデルと組み合わせて使用する軽量モデル."
|
||||
]
|
||||
},
|
||||
"loraWeight": {
|
||||
"heading": "重み"
|
||||
"heading": "重み",
|
||||
"paragraphs": [
|
||||
"LoRA の重み. 重みを大きくすると, 最終的な画像への影響が大きくなります."
|
||||
]
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
"heading": "Downscale"
|
||||
"heading": "Downscale",
|
||||
"paragraphs": [
|
||||
"埋め込む前にどの程度のダウンスケーリングが行われるか。",
|
||||
"ダウンスケーリングを大きくするとパフォーマンスは向上しますが、品質は低下します。"
|
||||
]
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "重み"
|
||||
"heading": "重み",
|
||||
"paragraphs": [
|
||||
"レイヤーが生成プロセスにどの程度影響を与えるかを調整します",
|
||||
"• 高いウエイト (.75-2): 最終結果にさらに大きな影響を及ぼします.",
|
||||
"• 低いウエイト (0-.75): 最終結果への影響が小さくなります."
|
||||
]
|
||||
},
|
||||
"paramNegativeConditioning": {
|
||||
"paragraphs": [
|
||||
"生成プロセスでは、ネガティブプロンプトに含まれる概念を回避します.これを使用して、出力から特定の性質やオブジェクトを除外します.",
|
||||
"強制された構文と埋め込みをサポート."
|
||||
],
|
||||
"heading": "ネガティブプロンプト"
|
||||
},
|
||||
"clipSkip": {
|
||||
"paragraphs": [
|
||||
"スキップする CLIP モデルのレイヤー数.",
|
||||
"特定のモデルは、CLIP Skip と併用するとより適しています."
|
||||
],
|
||||
"heading": "クリップスキップ"
|
||||
},
|
||||
"compositingMaskBlur": {
|
||||
"heading": "マスクぼかし",
|
||||
"paragraphs": [
|
||||
"マスクのぼかし半径."
|
||||
]
|
||||
},
|
||||
"paramPositiveConditioning": {
|
||||
"paragraphs": [
|
||||
"生成プロセスをガイドします.任意の単語やフレーズを使用できます.",
|
||||
"強制とダイナミックプロンプトの構文と埋め込み."
|
||||
],
|
||||
"heading": "ポジティブプロンプト"
|
||||
},
|
||||
"compositingMaskAdjustments": {
|
||||
"heading": "マスク調整",
|
||||
"paragraphs": [
|
||||
"マスクを調整する."
|
||||
]
|
||||
},
|
||||
"compositingCoherenceMinDenoise": {
|
||||
"paragraphs": [
|
||||
"コヒーレンスモードの最小ノイズ除去強度",
|
||||
"インペインティングまたはアウトペインティング時のコヒーレンス領域の最小ノイズ除去強度"
|
||||
],
|
||||
"heading": "最小ノイズ除去"
|
||||
},
|
||||
"compositingCoherencePass": {
|
||||
"paragraphs": [
|
||||
"2 回目のノイズ除去は,インペイント/アウトペイントされた画像の合成に役立ちます."
|
||||
],
|
||||
"heading": "コヒーレンスパス"
|
||||
},
|
||||
"controlNetBeginEnd": {
|
||||
"paragraphs": [
|
||||
"この設定は,ノイズ除去 (生成) プロセスのどの部分にこのレイヤーからのガイダンスが組み込まれるかを決定します.",
|
||||
"• 開始ステップ (%): 生成プロセス中にこのレイヤーからのガイダンスの適用を開始するタイミングを指定します.",
|
||||
"• 終了ステップ (%): このレイヤーのガイダンスの適用を停止し,モデルやその他の設定からの一般的なガイダンスを元に戻すタイミングを指定します."
|
||||
],
|
||||
"heading": "開始/終了ステップの割合"
|
||||
},
|
||||
"compositingCoherenceEdgeSize": {
|
||||
"heading": "エッジサイズ",
|
||||
"paragraphs": [
|
||||
"コヒーレンスパスのエッジサイズ."
|
||||
]
|
||||
},
|
||||
"compositingBlurMethod": {
|
||||
"paragraphs": [
|
||||
"マスクされた領域に適用されるぼかし方法."
|
||||
],
|
||||
"heading": "ぼかし方法"
|
||||
},
|
||||
"inpainting": {
|
||||
"heading": "インペインティング",
|
||||
"paragraphs": [
|
||||
"ノイズ除去の強度に応じて,変更する領域を制御します."
|
||||
]
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"heading": "ダイナミックプロンプト",
|
||||
"paragraphs": [
|
||||
"ダイナミック プロンプトは,単一のプロンプトを複数のプロンプトに解析します.",
|
||||
"基本的な構文は「{赤|緑|青}のボール」です.これにより,「赤いボール」「緑のボール」「青いボール」という3つのプロンプトが生成されます.",
|
||||
"1 つのプロンプト内で構文を何度でも使用できますが, 生成されるプロンプトの数を Max Prompts 設定で制限するようにしてください."
|
||||
]
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
"heading": "リサイズモード",
|
||||
"paragraphs": [
|
||||
"コントロールアダプタの入力画像サイズを出力生成サイズに適合させるメソッド."
|
||||
]
|
||||
},
|
||||
"controlNetProcessor": {
|
||||
"heading": "プロセッサー",
|
||||
"paragraphs": [
|
||||
"入力画像を処理する生成プロセスをガイドするメソッド.プロセッサによって,生成される画像に異なる効果やスタイルが与えられます。"
|
||||
]
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
"heading": "コントロールモード",
|
||||
"paragraphs": [
|
||||
"プロンプトまたは コントロールネットのいずれかを重視します."
|
||||
]
|
||||
},
|
||||
"noiseUseCPU": {
|
||||
"paragraphs": [
|
||||
"CPU または GPU でノイズを生成するかどうかを制御します.",
|
||||
"CPU ノイズを有効にすると, 特定のシードによってどのマシンでも同じ画像が生成されます.",
|
||||
"CPU ノイズを有効にしてもパフォーマンスに影響はありません."
|
||||
],
|
||||
"heading": "CPUノイズを使用する"
|
||||
},
|
||||
"dynamicPromptsMaxPrompts": {
|
||||
"heading": "最大プロンプト",
|
||||
"paragraphs": [
|
||||
"ダイナミック プロンプトによって生成できるプロンプトの数を制限します."
|
||||
]
|
||||
},
|
||||
"dynamicPromptsSeedBehaviour": {
|
||||
"paragraphs": [
|
||||
"プロンプトを生成するときにシードがどのように使用されるかを制御します.",
|
||||
"反復ごとに固有のシードを使用します. 単一のシードでプロンプトのバリエーションを試す場合に使用します.",
|
||||
"たとえば, プロンプトが 5 つある場合, 各画像は同じシードを使用します.",
|
||||
"「画像ごと」では, 画像ごとに固有のシード値が使用されます. これにより、より多くのバリエーションが得られます."
|
||||
],
|
||||
"heading": "シード行動"
|
||||
},
|
||||
"imageFit": {
|
||||
"paragraphs": [
|
||||
"初期画像の幅と高さを出力画像に合わせてサイズ変更します. 有効にすることをお勧めします."
|
||||
],
|
||||
"heading": "初期画像を出力サイズに合わせる"
|
||||
},
|
||||
"infillMethod": {
|
||||
"heading": "充填方法",
|
||||
"paragraphs": [
|
||||
"アウトペインティングまたはインペインティングのプロセス中に埋め込む方法."
|
||||
]
|
||||
},
|
||||
"paramGuidance": {
|
||||
"paragraphs": [
|
||||
"プロンプトが生成プロセスにどの程度影響するかを制御します。",
|
||||
"ガイダンス値が高すぎると過飽和状態になる可能性があり、ガイダンス値が高すぎるか低すぎると生成結果に歪みが生じる可能性があります。ガイダンスはFLUX DEVモデルにのみ適用されます。"
|
||||
],
|
||||
"heading": "ガイダンス"
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
"paragraphs": [
|
||||
"生成されたイメージがラスター レイヤーとどの程度異なるかを制御します。",
|
||||
"強度が低いほど、結合された表示ラスターレイヤーに近くなります。強度が高いほど、グローバルプロンプトに大きく依存します。",
|
||||
"表示されるコンテンツを持つラスター レイヤーがない場合、この設定は無視されます。"
|
||||
],
|
||||
"heading": "ディノイジングストレングス"
|
||||
},
|
||||
"refinerStart": {
|
||||
"heading": "リファイナースタート",
|
||||
"paragraphs": [
|
||||
"生成プロセスのどの時点でリファイナーが使用され始めるか。",
|
||||
"0 はリファイナーが生成プロセス全体で使用されることを意味し、0.8 は、リファイナーが生成プロセスの最後の 20% で使用されることを意味します。"
|
||||
]
|
||||
},
|
||||
"optimizedDenoising": {
|
||||
"heading": "イメージtoイメージの最適化",
|
||||
"paragraphs": [
|
||||
"「イメージtoイメージを最適化」を有効にすると、Fluxモデルを用いた画像間変換およびインペインティング変換において、より段階的なノイズ除去強度スケールが適用されます。この設定により、画像に適用される変化量を制御する能力が向上しますが、標準のノイズ除去強度スケールを使用したい場合はオフにすることができます。この設定は現在調整中で、ベータ版です。"
|
||||
]
|
||||
},
|
||||
"refinerPositiveAestheticScore": {
|
||||
"heading": "ポジティブ美的スコア",
|
||||
"paragraphs": [
|
||||
"トレーニング データに基づいて、美的スコアの高い画像に類似するように生成を重み付けします。"
|
||||
]
|
||||
},
|
||||
"paramCFGScale": {
|
||||
"paragraphs": [
|
||||
"プロンプトが生成プロセスにどの程度影響するかを制御します。",
|
||||
"CFG スケールの値が高すぎると、飽和しすぎて生成結果が歪む可能性があります。 "
|
||||
],
|
||||
"heading": "CFGスケール"
|
||||
},
|
||||
"paramVAEPrecision": {
|
||||
"paragraphs": [
|
||||
"VAE エンコードおよびデコード時に使用される精度。",
|
||||
"Fp16/Half 精度は、画像のわずかな変化を犠牲にして、より効率的です。"
|
||||
],
|
||||
"heading": "VAE精度"
|
||||
},
|
||||
"refinerModel": {
|
||||
"heading": "リファイナーモデル",
|
||||
"paragraphs": [
|
||||
"生成プロセスの精製部分で使用されるモデル。",
|
||||
"世代モデルに似ています。"
|
||||
]
|
||||
},
|
||||
"refinerCfgScale": {
|
||||
"heading": "CFGスケール",
|
||||
"paragraphs": [
|
||||
"プロンプトが生成プロセスに与える影響を制御する。",
|
||||
"生成CFG スケールに似ています。"
|
||||
]
|
||||
},
|
||||
"seamlessTilingYAxis": {
|
||||
"heading": "シームレスタイリングY軸",
|
||||
"paragraphs": [
|
||||
"画像を垂直軸に沿ってシームレスに並べます。"
|
||||
]
|
||||
},
|
||||
"scaleBeforeProcessing": {
|
||||
"heading": "プロセス前のスケール値",
|
||||
"paragraphs": [
|
||||
"「自動」は、画像生成プロセスの前に、選択した領域をモデルに最適なサイズに拡大縮小します。",
|
||||
"「手動」では、画像生成プロセスの前に、選択した領域を拡大縮小する幅と高さを選択できます。"
|
||||
]
|
||||
},
|
||||
"creativity": {
|
||||
"heading": "クリエイティビティ",
|
||||
"paragraphs": [
|
||||
"クリエイティビティは、ディテールを追加する際のモデルに与えられる自由度を制御します。クリエイティビティが低いと元のイメージに近いままになり、クリエイティビティが高いとより多くの変化を加えることができます。プロンプトを使用する場合、クリエイティビティが高いとプロンプトの影響が増します。"
|
||||
]
|
||||
},
|
||||
"paramHrf": {
|
||||
"heading": "高解像度修正を有効にする",
|
||||
"paragraphs": [
|
||||
"モデルに最適な解像度よりも高い解像度で、高品質な画像を生成します。通常、生成された画像内の重複を防ぐために使用されます。"
|
||||
]
|
||||
},
|
||||
"seamlessTilingXAxis": {
|
||||
"heading": "シームレスタイリングX軸",
|
||||
"paragraphs": [
|
||||
"画像を水平軸に沿ってシームレスに並べます。"
|
||||
]
|
||||
},
|
||||
"paramCFGRescaleMultiplier": {
|
||||
"paragraphs": [
|
||||
"ゼロ端末 SNR (ztsnr) を使用してトレーニングされたモデルに使用される、CFG ガイダンスのリスケールマルチプライヤー。",
|
||||
"これらのモデルの場合、推奨値は 0.7 です。"
|
||||
],
|
||||
"heading": "CFG リスケールマルチプライヤー"
|
||||
},
|
||||
"structure": {
|
||||
"heading": "ストラクチャ",
|
||||
"paragraphs": [
|
||||
"ストラクチャは、出力画像が元のレイアウトにどれだけ忠実に従うかを制御します。低いストラクチャでは大幅な変更が可能ですが、高いストラクチャでは元の構成とレイアウトが厳密に維持されます。"
|
||||
]
|
||||
},
|
||||
"refinerNegativeAestheticScore": {
|
||||
"paragraphs": [
|
||||
"トレーニング データに基づいて、美観スコアが低い画像に類似するように生成に重み付けします。"
|
||||
],
|
||||
"heading": "ネガティブ美的スコア"
|
||||
},
|
||||
"fluxDevLicense": {
|
||||
"heading": "非商用ライセンス",
|
||||
"paragraphs": [
|
||||
"FLUX.1 [dev]モデルは、FLUX [dev]非商用ライセンスに基づいてライセンスされています。Invokeでこのモデルタイプを商用目的で使用する場合は、当社のウェブサイトをご覧ください。"
|
||||
]
|
||||
}
|
||||
},
|
||||
"accordions": {
|
||||
@@ -1340,7 +1823,18 @@
|
||||
"scheduler": "スケジューラー",
|
||||
"loading": "ロード中...",
|
||||
"steps": "ステップ",
|
||||
"refiner": "Refiner"
|
||||
"refiner": "Refiner",
|
||||
"negStylePrompt": "ネガティブスタイルプロンプト",
|
||||
"noModelsAvailable": "利用できるモデルがありません",
|
||||
"posStylePrompt": "ポジティブスタイルプロンプト",
|
||||
"cfgScale": "CFGスケール",
|
||||
"concatPromptStyle": "リンキングプロンプトとスタイル",
|
||||
"freePromptStyle": "手動スタイルプロンプト",
|
||||
"posAestheticScore": "ポジティブ美的スコア",
|
||||
"refinerSteps": "リファイナーステップ",
|
||||
"refinerStart": "リファイナースタート",
|
||||
"refinermodel": "リファイナーモデル",
|
||||
"negAestheticScore": "ネガティブ美的スコア"
|
||||
},
|
||||
"modelCache": {
|
||||
"clear": "モデルキャッシュを消去",
|
||||
@@ -1351,7 +1845,106 @@
|
||||
"workflows": "ワークフロー",
|
||||
"ascending": "昇順",
|
||||
"name": "名前",
|
||||
"descending": "降順"
|
||||
"descending": "降順",
|
||||
"searchPlaceholder": "名前、説明、タグで検索",
|
||||
"projectWorkflows": "プロジェクトワークフロー",
|
||||
"searchWorkflows": "ワークフローを検索",
|
||||
"updated": "アップデート",
|
||||
"published": "公表",
|
||||
"builder": {
|
||||
"label": "ラベル",
|
||||
"containerPlaceholder": "空のコンテナ",
|
||||
"showDescription": "説明を表示",
|
||||
"emptyRootPlaceholderEditMode": "開始するには、フォーム要素またはノード フィールドをここにドラッグします。",
|
||||
"divider": "仕切り",
|
||||
"deleteAllElements": "すべてのフォーム要素を削除",
|
||||
"heading": "見出し",
|
||||
"nodeField": "ノードフィールド",
|
||||
"zoomToNode": "ノードにズーム",
|
||||
"dropdown": "ドロップダウン",
|
||||
"resetOptions": "オプションをリセット",
|
||||
"both": "両方",
|
||||
"builder": "フォームビルダー",
|
||||
"text": "テキスト",
|
||||
"row": "行",
|
||||
"multiLine": "マルチライン",
|
||||
"resetAllNodeFields": "すべてのノードフィールドをリセット",
|
||||
"slider": "スライダー",
|
||||
"layout": "レイアウト",
|
||||
"addToForm": "フォームに追加",
|
||||
"headingPlaceholder": "空の見出し",
|
||||
"nodeFieldTooltip": "ノード フィールドを追加するには、ワークフロー エディターのフィールドにある小さなプラス記号ボタンをクリックするか、フィールド名をフォームにドラッグします。",
|
||||
"workflowBuilderAlphaWarning": "ワークフロービルダーは現在アルファ版です。安定版リリースまでに互換性に影響する変更が発生する可能性があります。",
|
||||
"component": "コンポーネント",
|
||||
"textPlaceholder": "空のテキスト",
|
||||
"emptyRootPlaceholderViewMode": "このワークフローのフォームの作成を開始するには、[編集] をクリックします。",
|
||||
"addOption": "オプションを追加",
|
||||
"singleLine": "単線",
|
||||
"numberInput": "数値入力",
|
||||
"column": "列",
|
||||
"container": "コンテナ",
|
||||
"containerRowLayout": "コンテナ(行レイアウト)",
|
||||
"containerColumnLayout": "コンテナ(列レイアウト)",
|
||||
"maximum": "最大",
|
||||
"published": "公開済み",
|
||||
"publishedWorkflowOutputs": "アウトプット",
|
||||
"minimum": "最小",
|
||||
"publish": "公開",
|
||||
"unpublish": "非公開",
|
||||
"publishedWorkflowInputs": "インプット"
|
||||
},
|
||||
"chooseWorkflowFromLibrary": "ライブラリからワークフローを選択",
|
||||
"unnamedWorkflow": "名前のないワークフロー",
|
||||
"download": "ダウンロード",
|
||||
"savingWorkflow": "ワークフローを保存しています...",
|
||||
"problemSavingWorkflow": "ワークフローの保存に関する問題",
|
||||
"convertGraph": "グラフを変換",
|
||||
"downloadWorkflow": "ファイルに保存",
|
||||
"saveWorkflow": "ワークフローを保存",
|
||||
"userWorkflows": "ユーザーワークフロー",
|
||||
"yourWorkflows": "あなたのワークフロー",
|
||||
"edit": "編集",
|
||||
"workflowLibrary": "ワークフローライブラリ",
|
||||
"workflowSaved": "ワークフローが保存されました",
|
||||
"clearWorkflowSearchFilter": "ワークフロー検索フィルタをクリア",
|
||||
"workflowCleared": "ワークフローが作成されました",
|
||||
"autoLayout": "オートレイアウト",
|
||||
"view": "ビュー",
|
||||
"saveChanges": "変更を保存",
|
||||
"noDescription": "説明なし",
|
||||
"recommended": "あなたへのおすすめ",
|
||||
"noRecentWorkflows": "最近のワークフローがありません",
|
||||
"problemLoading": "ワークフローのローディングに関する問題",
|
||||
"newWorkflowCreated": "新しいワークフローが作成されました",
|
||||
"noWorkflows": "ワークフローがありません",
|
||||
"copyShareLink": "共有リンクをコピー",
|
||||
"copyShareLinkForWorkflow": "ワークフローの共有リンクをコピー",
|
||||
"workflowThumbnail": "ワークフローサムネイル",
|
||||
"loadWorkflow": "$t(common.load) ワークフロー",
|
||||
"shared": "共有",
|
||||
"openWorkflow": "ワークフローを開く",
|
||||
"emptyStringPlaceholder": "<空の文字列>",
|
||||
"browseWorkflows": "ワークフローを閲覧する",
|
||||
"saveWorkflowAs": "ワークフローとして保存",
|
||||
"private": "プライベート",
|
||||
"deselectAll": "すべて選択解除",
|
||||
"delete": "削除",
|
||||
"openLibrary": "ライブラリを開く",
|
||||
"loadMore": "もっと読み込む",
|
||||
"saveWorkflowToProject": "ワークフローをプロジェクトに保存",
|
||||
"created": "作成されました",
|
||||
"workflowEditorMenu": "ワークフローエディターメニュー",
|
||||
"defaultWorkflows": "デフォルトワークフロー",
|
||||
"allLoaded": "すべてのワークフローが読み込まれました",
|
||||
"filterByTags": "タグでフィルター",
|
||||
"recentlyOpened": "最近開いた",
|
||||
"opened": "オープン",
|
||||
"deleteWorkflow": "ワークフローを削除",
|
||||
"deleteWorkflow2": "このワークフローを削除してもよろしいですか? 元に戻すことはできません。",
|
||||
"loadFromGraph": "グラフからワークフローをロード",
|
||||
"workflowName": "ワークフロー名",
|
||||
"loading": "ワークフローをロードしています",
|
||||
"uploadWorkflow": "ファイルからロードする"
|
||||
},
|
||||
"system": {
|
||||
"logNamespaces": {
|
||||
@@ -1370,5 +1963,20 @@
|
||||
"fatal": "Fatal",
|
||||
"warn": "Warn"
|
||||
}
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"promptsPreview": "プロンプトプレビュー",
|
||||
"seedBehaviour": {
|
||||
"label": "シードの挙動",
|
||||
"perPromptLabel": "画像ごとのシード",
|
||||
"perIterationLabel": "いてレーションごとのシード",
|
||||
"perPromptDesc": "それぞれの画像に足して別のシードを使う",
|
||||
"perIterationDesc": "それぞれのいてレーションに別のシードを使う"
|
||||
},
|
||||
"showDynamicPrompts": "ダイナミックプロンプトを表示する",
|
||||
"promptsToGenerate": "生成するプロンプト",
|
||||
"dynamicPrompts": "ダイナミックプロンプト",
|
||||
"loading": "ダイナミックプロンプトを生成...",
|
||||
"maxPrompts": "最大プロンプト"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
"boards": "Bảng",
|
||||
"selectedForAutoAdd": "Đã Chọn Để Tự động thêm",
|
||||
"myBoard": "Bảng Của Tôi",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Bảng đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại riêng cho chủ ảnh.",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Bảng và ảnh đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại riêng cho chủ ảnh.",
|
||||
"changeBoard": "Thay Đổi Bảng",
|
||||
"clearSearch": "Làm Sạch Thanh Tìm Kiếm",
|
||||
"updateBoardError": "Lỗi khi cập nhật Bảng",
|
||||
@@ -41,18 +41,21 @@
|
||||
"deleteBoard": "Xoá Bảng",
|
||||
"deleteBoardAndImages": "Xoá Bảng Lẫn Hình ảnh",
|
||||
"deleteBoardOnly": "Chỉ Xoá Bảng",
|
||||
"deletedBoardsCannotbeRestored": "Bảng đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại.",
|
||||
"bottomMessage": "Xoá bảng này lẫn ảnh của nó sẽ khởi động lại mọi tính năng đang sử dụng chúng.",
|
||||
"deletedBoardsCannotbeRestored": "Bảng và ảnh đã xoá sẽ không thể khôi phục lại. Chọn 'Chỉ Xoá Bảng' sẽ dời ảnh vào trạng thái chưa phân loại.",
|
||||
"bottomMessage": "Việc xóa ảnh sẽ khởi động lại mọi tính năng đang sử dụng chúng.",
|
||||
"menuItemAutoAdd": "Tự động thêm cho Bảng này",
|
||||
"move": "Di Chuyển",
|
||||
"topMessage": "Bảng này chứa ảnh được dùng với những tính năng sau:",
|
||||
"topMessage": "Lựa chọn này chứa ảnh được dùng với những tính năng sau:",
|
||||
"uncategorized": "Chưa Sắp Xếp",
|
||||
"archived": "Được Lưu Trữ",
|
||||
"loading": "Đang Tải...",
|
||||
"selectBoard": "Chọn Bảng",
|
||||
"archiveBoard": "Lưu trữ Bảng",
|
||||
"unarchiveBoard": "Ngừng Lưu Trữ Bảng",
|
||||
"assetsWithCount_other": "{{count}} tài nguyên"
|
||||
"assetsWithCount_other": "{{count}} tài nguyên",
|
||||
"uncategorizedImages": "Ảnh Chưa Sắp Xếp",
|
||||
"deleteAllUncategorizedImages": "Xoá Tất Cả Ảnh Chưa Sắp Xếp",
|
||||
"deletedImagesCannotBeRestored": "Ảnh đã xoá không thể phục hồi lại."
|
||||
},
|
||||
"gallery": {
|
||||
"swapImages": "Đổi Hình Ảnh",
|
||||
@@ -240,7 +243,15 @@
|
||||
"error_withCount_other": "{{count}} lỗi",
|
||||
"value": "Giá Trị",
|
||||
"label": "Nhãn Tên",
|
||||
"systemInformation": "Thông Tin Hệ Thống"
|
||||
"systemInformation": "Thông Tin Hệ Thống",
|
||||
"model_withCount_other": "{{count}} model",
|
||||
"noOptions": "Không Có Lựa Chọn",
|
||||
"noMatches": "Không Có Mục Phù Hợp",
|
||||
"search": "Tìm Kiếm",
|
||||
"clear": "Dọn Dẹp",
|
||||
"compactView": "Chế Độ Xem Gọn",
|
||||
"fullView": "Chế Độ Xem Đầy Đủ",
|
||||
"options_withCount_other": "{{count}} thiết lập"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Thêm Prompt Trigger",
|
||||
@@ -321,7 +332,8 @@
|
||||
"confirm": "Đồng Ý",
|
||||
"retrySucceeded": "Mục Đã Thử Lại",
|
||||
"retryFailed": "Có Vấn Đề Khi Thử Lại Mục",
|
||||
"retryItem": "Thử Lại Mục"
|
||||
"retryItem": "Thử Lại Mục",
|
||||
"credits": "Nguồn"
|
||||
},
|
||||
"hotkeys": {
|
||||
"canvas": {
|
||||
@@ -775,7 +787,14 @@
|
||||
"fluxRedux": "FLUX Redux",
|
||||
"sigLip": "SigLIP",
|
||||
"llavaOnevision": "LLaVA OneVision",
|
||||
"fileSize": "Kích Thước Tệp"
|
||||
"fileSize": "Kích Thước Tệp",
|
||||
"filterModels": "Lọc Model",
|
||||
"modelPickerFallbackNoModelsInstalled2": "Nhấp vào <LinkComponent>Trình Quản Lý Model</LinkComponent> để tải.",
|
||||
"modelPickerFallbackNoModelsInstalled": "Không Có Sẵn Model.",
|
||||
"manageModels": "Quản Lý Model",
|
||||
"hfTokenReset": "Làm Mới HF Token",
|
||||
"relatedModels": "Model Liên Quan",
|
||||
"showOnlyRelatedModels": "Liên Quan"
|
||||
},
|
||||
"metadata": {
|
||||
"guidance": "Hướng Dẫn",
|
||||
@@ -1518,7 +1537,8 @@
|
||||
"modelIncompatibleBboxWidth": "Chiều rộng hộp giới hạn là {{width}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelIncompatibleBboxHeight": "Chiều dài hộp giới hạn là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelIncompatibleScaledBboxHeight": "Chiều dài hộp giới hạn theo tỉ lệ là {{height}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelIncompatibleScaledBboxWidth": "Chiều rộng hộp giới hạn theo tỉ lệ là {{width}} nhưng {{model}} yêu cầu bội số của {{multiple}}"
|
||||
"modelIncompatibleScaledBboxWidth": "Chiều rộng hộp giới hạn theo tỉ lệ là {{width}} nhưng {{model}} yêu cầu bội số của {{multiple}}",
|
||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần thiết lập tài khoản để nâng cấp."
|
||||
},
|
||||
"cfgScale": "Thang CFG",
|
||||
"useSeed": "Dùng Hạt Giống",
|
||||
@@ -1581,7 +1601,8 @@
|
||||
"usePrompt": "Dùng Lệnh",
|
||||
"upscaling": "Upscale",
|
||||
"tileSize": "Kích Thước Khối",
|
||||
"disabledNoRasterContent": "Đã Tắt (Không Có Nội Dung Dạng Raster)"
|
||||
"disabledNoRasterContent": "Đã Tắt (Không Có Nội Dung Dạng Raster)",
|
||||
"modelDisabledForTrial": "Tạo sinh với {{modelName}} là không thể với tài khoản trial. Vào phần <LinkComponent>thiết lập tài khoản</LinkComponent> để nâng cấp."
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"seedBehaviour": {
|
||||
@@ -1699,12 +1720,16 @@
|
||||
"fitBboxToLayers": "Xếp Vừa Hộp Giới Hạn Vào Layer",
|
||||
"ipAdapterMethod": {
|
||||
"full": "Phong Cách Và Thành Phần",
|
||||
"style": "Chỉ Lấy Phong Cách",
|
||||
"style": "Phong Cách (Đơn Giản)",
|
||||
"composition": "Chỉ Lấy Thành Phần",
|
||||
"ipAdapterMethod": "Cách Thức",
|
||||
"compositionDesc": "Áp dụng cách trình bày và bỏ qua phong cách mẫu.",
|
||||
"fullDesc": "Áp dụng phong cách trực quan (màu, cấu tạo) & thành phần (cách trình bày).",
|
||||
"styleDesc": "Áp dụng phong cách trực quan (màu, cấu tạo) và bỏ qua cách trình bày."
|
||||
"styleDesc": "Áp dụng phong cách trực quan (màu, cấu tạo) và bỏ qua cách trình bày. Tên trước đây là Chỉ Lấy Phong Cách.",
|
||||
"styleStrong": "Phong Cách (Mạnh Mẽ)",
|
||||
"styleStrongDesc": "Áp dụng cách trình bày mạnh mẽ, với một chút giảm nhẹ ảnh hưởng lên thành phần.",
|
||||
"stylePrecise": "Phong Cách (Chính Xác)",
|
||||
"stylePreciseDesc": "Áp dụng cách trình bày chính xác, loại bỏ các chủ thể ảnh hưởng."
|
||||
},
|
||||
"deletePrompt": "Xoá Lệnh",
|
||||
"rasterLayer": "Layer Dạng Raster",
|
||||
@@ -2037,7 +2062,7 @@
|
||||
"colorPicker": "Chọn Màu"
|
||||
},
|
||||
"mergingLayers": "Đang gộp layer",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"controlLayerEmptyState": "<UploadButton>Tải lên ảnh</UploadButton>, kéo thả ảnh từ <GalleryButton>thư viện</GalleryButton> vào layer này, <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton>, hoặc vẽ trên canvas để bắt đầu.",
|
||||
"referenceImageEmptyState": "<UploadButton>Tải lên hình ảnh</UploadButton>, kéo ảnh từ <GalleryButton>thư viện ảnh</GalleryButton> vào layer này, hoặc <PullBboxButton>kéo hộp giới hạn vào layer này</PullBboxButton> để bắt đầu.",
|
||||
"useImage": "Dùng Hình Ảnh",
|
||||
"resetCanvasLayers": "Khởi Động Lại Layer Canvas",
|
||||
@@ -2086,7 +2111,11 @@
|
||||
"imageInfluence": "Ảnh Chi Phối",
|
||||
"medium": "Vừa",
|
||||
"highest": "Cao Nhất"
|
||||
}
|
||||
},
|
||||
"addDenoiseLimit": "Thêm $t(controlLayers.denoiseLimit)",
|
||||
"imageNoise": "Độ Nhiễu Hình Ảnh",
|
||||
"denoiseLimit": "Giới Hạn Khử Nhiễu",
|
||||
"addImageNoise": "Thêm $t(controlLayers.imageNoise)"
|
||||
},
|
||||
"stylePresets": {
|
||||
"negativePrompt": "Lệnh Tiêu Cực",
|
||||
@@ -2226,7 +2255,9 @@
|
||||
"fluxFillIncompatibleWithT2IAndI2I": "FLUX Fill không tương tích với Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Dùng model FLUX khác cho các tính năng này.",
|
||||
"problemUnpublishingWorkflowDescription": "Có vấn đề khi ngừng đăng tải workflow. Vui lòng thử lại sau.",
|
||||
"workflowUnpublished": "Workflow Đã Được Ngừng Đăng Tải",
|
||||
"problemUnpublishingWorkflow": "Có Vấn Đề Khi Ngừng Đăng Tải Workflow"
|
||||
"problemUnpublishingWorkflow": "Có Vấn Đề Khi Ngừng Đăng Tải Workflow",
|
||||
"chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh và Hình Ảnh Sang Hình Ảnh. Hãy dùng model khác cho các tác vụ Inpaint và Outpaint.",
|
||||
"imagenIncompatibleGenerationMode": "Google {{model}} chỉ hỗ trợ Từ Ngữ Sang Hình Ảnh. Dùng các model khác cho Hình Ảnh Sang Hình Ảnh, Inpaint và Outpaint."
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2408,8 +2439,8 @@
|
||||
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
|
||||
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
||||
"items": [
|
||||
"Workflow: Hỗ trợ xâu ký tự thả xuống tùy chỉnh trong Trình Tạo Vùng Nhập.",
|
||||
"FLUX: Hỗ trợ FLUX Fill trong Workflow và Canvas."
|
||||
"Nvidia 50xx GPUs: Invoke sử dụng PyTorch 2.7.0, thứ tối quan trọng cho những GPU trên.",
|
||||
"Mối Quan Hệ Model: Kết nối LoRA với model chính, và LoRA đó sẽ được hiển thị đầu danh sách."
|
||||
]
|
||||
},
|
||||
"upsell": {
|
||||
|
||||
@@ -5,6 +5,7 @@ import type { StudioInitAction } from 'app/hooks/useStudioInitAction';
|
||||
import { $didStudioInit } from 'app/hooks/useStudioInitAction';
|
||||
import type { LoggingOverrides } from 'app/logging/logger';
|
||||
import { $loggingOverrides, configureLogging } from 'app/logging/logger';
|
||||
import { $accountSettingsLink } from 'app/store/nanostores/accountSettingsLink';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
||||
import { $customNavComponent } from 'app/store/nanostores/customNavComponent';
|
||||
@@ -12,10 +13,13 @@ import type { CustomStarUi } from 'app/store/nanostores/customStarUI';
|
||||
import { $customStarUI } from 'app/store/nanostores/customStarUI';
|
||||
import { $isDebugging } from 'app/store/nanostores/isDebugging';
|
||||
import { $logo } from 'app/store/nanostores/logo';
|
||||
import { $onClickGoToModelManager } from 'app/store/nanostores/onClickGoToModelManager';
|
||||
import { $openAPISchemaUrl } from 'app/store/nanostores/openAPISchemaUrl';
|
||||
import { $projectId, $projectName, $projectUrl } from 'app/store/nanostores/projectId';
|
||||
import { $queueId, DEFAULT_QUEUE_ID } from 'app/store/nanostores/queueId';
|
||||
import { $store } from 'app/store/nanostores/store';
|
||||
import { $toastMap } from 'app/store/nanostores/toastMap';
|
||||
import { $whatsNew } from 'app/store/nanostores/whatsNew';
|
||||
import { createStore } from 'app/store/store';
|
||||
import type { PartialAppConfig } from 'app/types/invokeai';
|
||||
import Loading from 'common/components/Loading/Loading';
|
||||
@@ -29,6 +33,7 @@ import {
|
||||
DEFAULT_WORKFLOW_LIBRARY_TAG_CATEGORIES,
|
||||
} from 'features/nodes/store/workflowLibrarySlice';
|
||||
import type { WorkflowCategory } from 'features/nodes/types/workflow';
|
||||
import type { ToastConfig } from 'features/toast/toast';
|
||||
import type { PropsWithChildren, ReactNode } from 'react';
|
||||
import React, { lazy, memo, useEffect, useLayoutEffect, useMemo } from 'react';
|
||||
import { Provider } from 'react-redux';
|
||||
@@ -45,6 +50,7 @@ interface Props extends PropsWithChildren {
|
||||
token?: string;
|
||||
config?: PartialAppConfig;
|
||||
customNavComponent?: ReactNode;
|
||||
accountSettingsLink?: string;
|
||||
middleware?: Middleware[];
|
||||
projectId?: string;
|
||||
projectName?: string;
|
||||
@@ -55,10 +61,16 @@ interface Props extends PropsWithChildren {
|
||||
socketOptions?: Partial<ManagerOptions & SocketOptions>;
|
||||
isDebugging?: boolean;
|
||||
logo?: ReactNode;
|
||||
toastMap?: Record<string, ToastConfig>;
|
||||
whatsNew?: ReactNode[];
|
||||
workflowCategories?: WorkflowCategory[];
|
||||
workflowTagCategories?: WorkflowTagCategory[];
|
||||
workflowSortOptions?: WorkflowSortOption[];
|
||||
loggingOverrides?: LoggingOverrides;
|
||||
/**
|
||||
* If provided, overrides in-app navigation to the model manager
|
||||
*/
|
||||
onClickGoToModelManager?: () => void;
|
||||
}
|
||||
|
||||
const InvokeAIUI = ({
|
||||
@@ -67,6 +79,7 @@ const InvokeAIUI = ({
|
||||
token,
|
||||
config,
|
||||
customNavComponent,
|
||||
accountSettingsLink,
|
||||
middleware,
|
||||
projectId,
|
||||
projectName,
|
||||
@@ -77,10 +90,13 @@ const InvokeAIUI = ({
|
||||
socketOptions,
|
||||
isDebugging = false,
|
||||
logo,
|
||||
toastMap,
|
||||
workflowCategories,
|
||||
workflowTagCategories,
|
||||
workflowSortOptions,
|
||||
loggingOverrides,
|
||||
onClickGoToModelManager,
|
||||
whatsNew,
|
||||
}: Props) => {
|
||||
useLayoutEffect(() => {
|
||||
/*
|
||||
@@ -169,6 +185,16 @@ const InvokeAIUI = ({
|
||||
};
|
||||
}, [customNavComponent]);
|
||||
|
||||
useEffect(() => {
|
||||
if (accountSettingsLink) {
|
||||
$accountSettingsLink.set(accountSettingsLink);
|
||||
}
|
||||
|
||||
return () => {
|
||||
$accountSettingsLink.set(undefined);
|
||||
};
|
||||
}, [accountSettingsLink]);
|
||||
|
||||
useEffect(() => {
|
||||
if (openAPISchemaUrl) {
|
||||
$openAPISchemaUrl.set(openAPISchemaUrl);
|
||||
@@ -205,6 +231,36 @@ const InvokeAIUI = ({
|
||||
};
|
||||
}, [logo]);
|
||||
|
||||
useEffect(() => {
|
||||
if (toastMap) {
|
||||
$toastMap.set(toastMap);
|
||||
}
|
||||
|
||||
return () => {
|
||||
$toastMap.set(undefined);
|
||||
};
|
||||
}, [toastMap]);
|
||||
|
||||
useEffect(() => {
|
||||
if (whatsNew) {
|
||||
$whatsNew.set(whatsNew);
|
||||
}
|
||||
|
||||
return () => {
|
||||
$whatsNew.set(undefined);
|
||||
};
|
||||
}, [whatsNew]);
|
||||
|
||||
useEffect(() => {
|
||||
if (onClickGoToModelManager) {
|
||||
$onClickGoToModelManager.set(onClickGoToModelManager);
|
||||
}
|
||||
|
||||
return () => {
|
||||
$onClickGoToModelManager.set(undefined);
|
||||
};
|
||||
}, [onClickGoToModelManager]);
|
||||
|
||||
useEffect(() => {
|
||||
if (workflowCategories) {
|
||||
$workflowLibraryCategoriesOptions.set(workflowCategories);
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import type { AlertStatus } from '@invoke-ai/ui-library';
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
@@ -6,11 +7,15 @@ import { withResult, withResultAsync } from 'common/util/result';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { $canvasManager } from 'features/controlLayers/store/ephemeral';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildChatGPT4oGraph } from 'features/nodes/util/graph/generation/buildChatGPT4oGraph';
|
||||
import { buildCogView4Graph } from 'features/nodes/util/graph/generation/buildCogView4Graph';
|
||||
import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph';
|
||||
import { buildImagen3Graph } from 'features/nodes/util/graph/generation/buildImagen3Graph';
|
||||
import { buildImagen4Graph } from 'features/nodes/util/graph/generation/buildImagen4Graph';
|
||||
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
|
||||
import { buildSD3Graph } from 'features/nodes/util/graph/generation/buildSD3Graph';
|
||||
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
|
||||
import { UnsupportedGenerationModeError } from 'features/nodes/util/graph/types';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
@@ -48,32 +53,52 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
return await buildFLUXGraph(state, manager);
|
||||
case 'cogview4':
|
||||
return await buildCogView4Graph(state, manager);
|
||||
case 'imagen3':
|
||||
return await buildImagen3Graph(state, manager);
|
||||
case 'imagen4':
|
||||
return await buildImagen4Graph(state, manager);
|
||||
case 'chatgpt-4o':
|
||||
return await buildChatGPT4oGraph(state, manager);
|
||||
default:
|
||||
assert(false, `No graph builders for base ${base}`);
|
||||
}
|
||||
});
|
||||
|
||||
if (buildGraphResult.isErr()) {
|
||||
let title = 'Failed to build graph';
|
||||
let status: AlertStatus = 'error';
|
||||
let description: string | null = null;
|
||||
if (buildGraphResult.error instanceof AssertionError) {
|
||||
description = extractMessageFromAssertionError(buildGraphResult.error);
|
||||
} else if (buildGraphResult.error instanceof UnsupportedGenerationModeError) {
|
||||
title = 'Unsupported generation mode';
|
||||
description = buildGraphResult.error.message;
|
||||
status = 'warning';
|
||||
}
|
||||
const error = serializeError(buildGraphResult.error);
|
||||
log.error({ error }, 'Failed to build graph');
|
||||
toast({
|
||||
status: 'error',
|
||||
title: 'Failed to build graph',
|
||||
status,
|
||||
title,
|
||||
description,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const { g, noise, posCond } = buildGraphResult.value;
|
||||
const { g, seedFieldIdentifier, positivePromptFieldIdentifier } = buildGraphResult.value;
|
||||
|
||||
const destination = state.canvasSettings.sendToCanvas ? 'canvas' : 'gallery';
|
||||
|
||||
const prepareBatchResult = withResult(() =>
|
||||
prepareLinearUIBatch(state, g, prepend, noise, posCond, 'canvas', destination)
|
||||
prepareLinearUIBatch({
|
||||
state,
|
||||
g,
|
||||
prepend,
|
||||
seedFieldIdentifier,
|
||||
positivePromptFieldIdentifier,
|
||||
origin: 'canvas',
|
||||
destination,
|
||||
})
|
||||
);
|
||||
|
||||
if (prepareBatchResult.isErr()) {
|
||||
@@ -89,7 +114,7 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
await req.unwrap();
|
||||
log.debug(parseify({ batchConfig: prepareBatchResult.value }), 'Enqueued batch');
|
||||
} catch (error) {
|
||||
log.error({ error: serializeError(error) }, 'Failed to enqueue batch');
|
||||
log.error({ error: serializeError(error as Error) }, 'Failed to enqueue batch');
|
||||
} finally {
|
||||
req.reset();
|
||||
}
|
||||
|
||||
@@ -18,16 +18,24 @@ export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening)
|
||||
const state = getState();
|
||||
const { prepend } = action.payload;
|
||||
|
||||
const { g, noise, posCond } = await buildMultidiffusionUpscaleGraph(state);
|
||||
const { g, seedFieldIdentifier, positivePromptFieldIdentifier } = await buildMultidiffusionUpscaleGraph(state);
|
||||
|
||||
const batchConfig = prepareLinearUIBatch(state, g, prepend, noise, posCond, 'upscaling', 'gallery');
|
||||
const batchConfig = prepareLinearUIBatch({
|
||||
state,
|
||||
g,
|
||||
prepend,
|
||||
seedFieldIdentifier,
|
||||
positivePromptFieldIdentifier,
|
||||
origin: 'upscaling',
|
||||
destination: 'gallery',
|
||||
});
|
||||
|
||||
const req = dispatch(queueApi.endpoints.enqueueBatch.initiate(batchConfig, enqueueMutationFixedCacheKeyOptions));
|
||||
try {
|
||||
await req.unwrap();
|
||||
log.debug(parseify({ batchConfig }), 'Enqueued batch');
|
||||
} catch (error) {
|
||||
log.error({ error: serializeError(error) }, 'Failed to enqueue batch');
|
||||
log.error({ error: serializeError(error as Error) }, 'Failed to enqueue batch');
|
||||
} finally {
|
||||
req.reset();
|
||||
}
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
import { atom } from 'nanostores';
|
||||
|
||||
export const $accountSettingsLink = atom<string | undefined>(undefined);
|
||||
@@ -0,0 +1,3 @@
|
||||
import { atom } from 'nanostores';
|
||||
|
||||
export const $onClickGoToModelManager = atom<(() => void) | undefined>(undefined);
|
||||
@@ -0,0 +1,4 @@
|
||||
import type { ToastConfig } from 'features/toast/toast';
|
||||
import { atom } from 'nanostores';
|
||||
|
||||
export const $toastMap = atom<Record<string, ToastConfig> | undefined>(undefined);
|
||||
@@ -0,0 +1,4 @@
|
||||
import { atom } from 'nanostores';
|
||||
import type { ReactNode } from 'react';
|
||||
|
||||
export const $whatsNew = atom<ReactNode[] | undefined>(undefined);
|
||||
@@ -145,7 +145,10 @@ const unserialize: UnserializeFunction = (data, key) => {
|
||||
);
|
||||
return transformed;
|
||||
} catch (err) {
|
||||
log.warn({ error: serializeError(err) }, `Error rehydrating slice "${key}", falling back to default initial state`);
|
||||
log.warn(
|
||||
{ error: serializeError(err as Error) },
|
||||
`Error rehydrating slice "${key}", falling back to default initial state`
|
||||
);
|
||||
return persistConfig.initialState;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -28,7 +28,8 @@ export type AppFeature =
|
||||
| 'starterModels'
|
||||
| 'hfToken'
|
||||
| 'retryQueueItem'
|
||||
| 'cancelAndClearAll';
|
||||
| 'cancelAndClearAll'
|
||||
| 'chatGPT4oHigh';
|
||||
/**
|
||||
* A disable-able Stable Diffusion feature
|
||||
*/
|
||||
@@ -83,6 +84,7 @@ export type AppConfig = {
|
||||
metadataFetchDebounce?: number;
|
||||
workflowFetchDebounce?: number;
|
||||
isLocal?: boolean;
|
||||
shouldShowCredits: boolean;
|
||||
sd: {
|
||||
defaultModel?: string;
|
||||
disabledControlNetModels: string[];
|
||||
|
||||
1092
invokeai/frontend/web/src/common/components/Picker/Picker.tsx
Normal file
1092
invokeai/frontend/web/src/common/components/Picker/Picker.tsx
Normal file
File diff suppressed because it is too large
Load Diff
@@ -83,7 +83,7 @@ export const useImageUploadButton = ({ onUpload, isDisabled, allowMultiple }: Us
|
||||
}
|
||||
} else {
|
||||
let imageDTOs: ImageDTO[] = [];
|
||||
if (isClientSideUploadEnabled) {
|
||||
if (isClientSideUploadEnabled && files.length > 1) {
|
||||
imageDTOs = await Promise.all(files.map((file, i) => clientSideUpload(file, i)));
|
||||
} else {
|
||||
imageDTOs = await uploadImages(
|
||||
|
||||
@@ -38,7 +38,7 @@ export const useModelCombobox = <T extends AnyModelConfig>(arg: UseModelCombobox
|
||||
}, [optionsFilter, getIsDisabled, modelConfigs, shouldShowModelDescriptions]);
|
||||
|
||||
const value = useMemo(
|
||||
() => options.find((m) => (selectedModel ? m.value === selectedModel.key : false)),
|
||||
() => options.find((m) => (selectedModel ? m.value === selectedModel.key : false)) ?? null,
|
||||
[options, selectedModel]
|
||||
);
|
||||
|
||||
|
||||
@@ -0,0 +1,108 @@
|
||||
import { useAppStore } from 'app/store/nanostores/store';
|
||||
import type { Dimensions } from 'features/controlLayers/store/types';
|
||||
import { selectUiSlice, textAreaSizesStateChanged } from 'features/ui/store/uiSlice';
|
||||
import { debounce } from 'lodash-es';
|
||||
import { type RefObject, useCallback, useEffect, useMemo } from 'react';
|
||||
|
||||
type Options = {
|
||||
trackWidth: boolean;
|
||||
trackHeight: boolean;
|
||||
initialWidth?: number;
|
||||
initialHeight?: number;
|
||||
};
|
||||
|
||||
/**
|
||||
* Persists the width and/or height of a text area to redux.
|
||||
* @param id The unique id of this textarea, used as key to storage
|
||||
* @param ref A ref to the textarea element
|
||||
* @param options.trackWidth Whether to track width
|
||||
* @param options.trackHeight Whether to track width
|
||||
* @param options.initialWidth An optional initial width in pixels
|
||||
* @param options.initialHeight An optional initial height in pixels
|
||||
*/
|
||||
export const usePersistedTextAreaSize = (id: string, ref: RefObject<HTMLTextAreaElement>, options: Options) => {
|
||||
const { dispatch, getState } = useAppStore();
|
||||
|
||||
const onResize = useCallback(
|
||||
(size: Partial<Dimensions>) => {
|
||||
dispatch(textAreaSizesStateChanged({ id, size }));
|
||||
},
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
const debouncedOnResize = useMemo(() => debounce(onResize, 300), [onResize]);
|
||||
|
||||
useEffect(() => {
|
||||
const el = ref.current;
|
||||
if (!el) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Nothing to do here if we are not tracking anything.
|
||||
if (!options.trackHeight && !options.trackWidth) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Before registering the observer, grab the stored size from state - we may need to restore the size.
|
||||
const storedSize = selectUiSlice(getState()).textAreaSizes[id];
|
||||
|
||||
// Prefer to restore the stored size, falling back to initial size if it exists
|
||||
if (storedSize?.width !== undefined) {
|
||||
el.style.width = `${storedSize.width}px`;
|
||||
} else if (options.initialWidth !== undefined) {
|
||||
el.style.width = `${options.initialWidth}px`;
|
||||
}
|
||||
|
||||
if (storedSize?.height !== undefined) {
|
||||
el.style.height = `${storedSize.height}px`;
|
||||
} else if (options.initialHeight !== undefined) {
|
||||
el.style.height = `${options.initialHeight}px`;
|
||||
}
|
||||
|
||||
let currentHeight = el.offsetHeight;
|
||||
let currentWidth = el.offsetWidth;
|
||||
|
||||
const resizeObserver = new ResizeObserver(() => {
|
||||
// We only want to push the changes if a tracked dimension changes
|
||||
let didChange = false;
|
||||
const newSize: Partial<Dimensions> = {};
|
||||
|
||||
if (options.trackHeight) {
|
||||
if (el.offsetHeight !== currentHeight) {
|
||||
didChange = true;
|
||||
currentHeight = el.offsetHeight;
|
||||
}
|
||||
newSize.height = currentHeight;
|
||||
}
|
||||
|
||||
if (options.trackWidth) {
|
||||
if (el.offsetWidth !== currentWidth) {
|
||||
didChange = true;
|
||||
currentWidth = el.offsetWidth;
|
||||
}
|
||||
newSize.width = currentWidth;
|
||||
}
|
||||
|
||||
if (didChange) {
|
||||
debouncedOnResize(newSize);
|
||||
}
|
||||
});
|
||||
|
||||
resizeObserver.observe(el);
|
||||
|
||||
return () => {
|
||||
debouncedOnResize.cancel();
|
||||
resizeObserver.disconnect();
|
||||
};
|
||||
}, [
|
||||
debouncedOnResize,
|
||||
dispatch,
|
||||
getState,
|
||||
id,
|
||||
options.initialHeight,
|
||||
options.initialWidth,
|
||||
options.trackHeight,
|
||||
options.trackWidth,
|
||||
ref,
|
||||
]);
|
||||
};
|
||||
@@ -0,0 +1,92 @@
|
||||
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import type { GroupBase } from 'chakra-react-select';
|
||||
import type { ModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import type { AnyModelConfig } from 'services/api/types';
|
||||
|
||||
import { useGroupedModelCombobox } from './useGroupedModelCombobox';
|
||||
import { useRelatedModelKeys } from './useRelatedModelKeys';
|
||||
import { useSelectedModelKeys } from './useSelectedModelKeys';
|
||||
|
||||
type UseRelatedGroupedModelComboboxArg<T extends AnyModelConfig> = {
|
||||
modelConfigs: T[];
|
||||
selectedModel?: ModelIdentifierField | null;
|
||||
onChange: (value: T | null) => void;
|
||||
getIsDisabled?: (model: T) => boolean;
|
||||
isLoading?: boolean;
|
||||
groupByType?: boolean;
|
||||
};
|
||||
|
||||
// Custom hook to overlay the grouped model combobox with related models on top!
|
||||
// Cleaner than hooking into useGroupedModelCombobox with a flag to enable/disable the related models
|
||||
// Also allows for related models to be shown conditionally with some pretty simple logic if it ends up as a config flag.
|
||||
|
||||
type UseRelatedGroupedModelComboboxReturn = {
|
||||
value: ComboboxOption | undefined | null;
|
||||
options: GroupBase<ComboboxOption>[];
|
||||
onChange: ComboboxOnChange;
|
||||
placeholder: string;
|
||||
noOptionsMessage: () => string;
|
||||
};
|
||||
|
||||
export function useRelatedGroupedModelCombobox<T extends AnyModelConfig>({
|
||||
modelConfigs,
|
||||
selectedModel,
|
||||
onChange,
|
||||
isLoading = false,
|
||||
getIsDisabled,
|
||||
groupByType,
|
||||
}: UseRelatedGroupedModelComboboxArg<T>): UseRelatedGroupedModelComboboxReturn {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const selectedKeys = useSelectedModelKeys();
|
||||
|
||||
const relatedKeys = useRelatedModelKeys(selectedKeys);
|
||||
|
||||
// Base grouped options
|
||||
const base = useGroupedModelCombobox({
|
||||
modelConfigs,
|
||||
selectedModel,
|
||||
onChange,
|
||||
getIsDisabled,
|
||||
isLoading,
|
||||
groupByType,
|
||||
});
|
||||
|
||||
// If no related models selected, just return base
|
||||
if (relatedKeys.size === 0) {
|
||||
return base;
|
||||
}
|
||||
|
||||
const relatedOptions: ComboboxOption[] = [];
|
||||
const updatedGroups: GroupBase<ComboboxOption>[] = [];
|
||||
|
||||
for (const group of base.options) {
|
||||
const remainingOptions: ComboboxOption[] = [];
|
||||
|
||||
for (const option of group.options) {
|
||||
if (relatedKeys.has(option.value)) {
|
||||
relatedOptions.push({ ...option, label: `* ${option.label}` });
|
||||
} else {
|
||||
remainingOptions.push(option);
|
||||
}
|
||||
}
|
||||
|
||||
if (remainingOptions.length > 0) {
|
||||
updatedGroups.push({
|
||||
label: group.label,
|
||||
options: remainingOptions,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const finalOptions: GroupBase<ComboboxOption>[] =
|
||||
relatedOptions.length > 0
|
||||
? [{ label: t('modelManager.relatedModels'), options: relatedOptions }, ...updatedGroups]
|
||||
: updatedGroups;
|
||||
|
||||
return {
|
||||
...base,
|
||||
options: finalOptions,
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
import { useMemo } from 'react';
|
||||
import { useGetRelatedModelIdsBatchQuery } from 'services/api/endpoints/modelRelationships';
|
||||
|
||||
/**
|
||||
* Fetches related model keys for a given set of selected model keys.
|
||||
* Returns a Set<string> for fast lookup.
|
||||
*/
|
||||
export const useRelatedModelKeys = (selectedKeys: Set<string>) => {
|
||||
const { data: related = [] } = useGetRelatedModelIdsBatchQuery([...selectedKeys], {
|
||||
skip: selectedKeys.size === 0,
|
||||
});
|
||||
|
||||
return useMemo(() => new Set(related), [related]);
|
||||
};
|
||||
@@ -0,0 +1,34 @@
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
|
||||
/**
|
||||
* Gathers all currently selected model keys from parameters and loras.
|
||||
* This includes the main model, VAE, refiner model, controlnet, and loras.
|
||||
*/
|
||||
export const useSelectedModelKeys = () => {
|
||||
return useAppSelector((state) => {
|
||||
const keys = new Set<string>();
|
||||
const main = state.params.model;
|
||||
const vae = state.params.vae;
|
||||
const refiner = state.params.refinerModel;
|
||||
const controlnet = state.params.controlLora;
|
||||
const loras = state.loras.loras.map((l) => l.model);
|
||||
|
||||
if (main) {
|
||||
keys.add(main.key);
|
||||
}
|
||||
if (vae) {
|
||||
keys.add(vae.key);
|
||||
}
|
||||
if (refiner) {
|
||||
keys.add(refiner.key);
|
||||
}
|
||||
if (controlnet) {
|
||||
keys.add(controlnet.key);
|
||||
}
|
||||
for (const lora of loras) {
|
||||
keys.add(lora.key);
|
||||
}
|
||||
|
||||
return keys;
|
||||
});
|
||||
};
|
||||
@@ -1,6 +1,10 @@
|
||||
/* eslint-disable @typescript-eslint/no-explicit-any */
|
||||
import { memo } from 'react';
|
||||
|
||||
/**
|
||||
* A typed version of React.memo, useful for components that take generics.
|
||||
*/
|
||||
export const typedMemo: <T>(c: T) => T = memo;
|
||||
export const typedMemo: <T extends keyof JSX.IntrinsicElements | React.JSXElementConstructor<any>>(
|
||||
component: T,
|
||||
propsAreEqual?: (prevProps: React.ComponentProps<T>, nextProps: React.ComponentProps<T>) => boolean
|
||||
) => T & { displayName?: string } = memo;
|
||||
|
||||
@@ -24,6 +24,7 @@ export const CanvasAddEntityButtons = memo(() => {
|
||||
const isReferenceImageEnabled = useIsEntityTypeEnabled('reference_image');
|
||||
const isRegionalGuidanceEnabled = useIsEntityTypeEnabled('regional_guidance');
|
||||
const isControlLayerEnabled = useIsEntityTypeEnabled('control_layer');
|
||||
const isInpaintLayerEnabled = useIsEntityTypeEnabled('inpaint_mask');
|
||||
|
||||
return (
|
||||
<Flex w="full" h="full" justifyContent="center" gap={4}>
|
||||
@@ -52,6 +53,7 @@ export const CanvasAddEntityButtons = memo(() => {
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addInpaintMask}
|
||||
isDisabled={!isInpaintLayerEnabled}
|
||||
>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</Button>
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
import { Spinner } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { useAllEntityAdapters } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { computed } from 'nanostores';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
export const CanvasBusySpinner = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const allEntityAdapters = useAllEntityAdapters();
|
||||
const $isPendingRectCalculation = useMemo(
|
||||
() =>
|
||||
computed(
|
||||
allEntityAdapters.map(({ transformer }) => transformer.$isPendingRectCalculation),
|
||||
(...values) => values.some((v) => v)
|
||||
),
|
||||
[allEntityAdapters]
|
||||
);
|
||||
const isPendingRectCalculation = useStore($isPendingRectCalculation);
|
||||
const isRasterizing = useStore(canvasManager.stateApi.$isRasterizing);
|
||||
const isCompositing = useStore(canvasManager.compositor.$isBusy);
|
||||
|
||||
if (isRasterizing || isCompositing || isPendingRectCalculation) {
|
||||
return <Spinner opacity={0.3} />;
|
||||
}
|
||||
return null;
|
||||
});
|
||||
CanvasBusySpinner.displayName = 'CanvasBusySpinner';
|
||||
@@ -25,6 +25,7 @@ export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
|
||||
const isReferenceImageEnabled = useIsEntityTypeEnabled('reference_image');
|
||||
const isRegionalGuidanceEnabled = useIsEntityTypeEnabled('regional_guidance');
|
||||
const isControlLayerEnabled = useIsEntityTypeEnabled('control_layer');
|
||||
const isInpaintLayerEnabled = useIsEntityTypeEnabled('inpaint_mask');
|
||||
|
||||
return (
|
||||
<Menu>
|
||||
@@ -46,7 +47,7 @@ export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
|
||||
</MenuItem>
|
||||
</MenuGroup>
|
||||
<MenuGroup title={t('controlLayers.regional')}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask} isDisabled={!isInpaintLayerEnabled}>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance} isDisabled={!isRegionalGuidanceEnabled}>
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user