mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-16 17:48:13 -05:00
Compare commits
282 Commits
v4.2.0
...
psyche/fea
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dc78a0e699 | ||
|
|
08a42c3c03 | ||
|
|
0758e9cb9b | ||
|
|
fb93e686b2 | ||
|
|
350feeed56 | ||
|
|
169b75b2b7 | ||
|
|
c88de180e7 | ||
|
|
7d1844eaf2 | ||
|
|
a98ddedb95 | ||
|
|
6063487b20 | ||
|
|
9a4c167342 | ||
|
|
19227fe4e6 | ||
|
|
db0ef8d316 | ||
|
|
6a34176376 | ||
|
|
d6696a7b97 | ||
|
|
0e81e7b460 | ||
|
|
7652fbc2e9 | ||
|
|
a55b2f09e2 | ||
|
|
23b05344a3 | ||
|
|
80905ff3ea | ||
|
|
df5457231f | ||
|
|
d30c1ad6dc | ||
|
|
b1f819ae8d | ||
|
|
eff359625a | ||
|
|
cef1585dfb | ||
|
|
cb8e9e1c7b | ||
|
|
f7c356d142 | ||
|
|
efb069dd71 | ||
|
|
8edc25d35a | ||
|
|
82957bb826 | ||
|
|
e51a3025ea | ||
|
|
f1bb7e86c0 | ||
|
|
93e4c3dbc2 | ||
|
|
c3f28f7a35 | ||
|
|
c900a63842 | ||
|
|
4eb5f004e6 | ||
|
|
bcae735d7c | ||
|
|
861f06c459 | ||
|
|
c493628272 | ||
|
|
46a90ca402 | ||
|
|
d45c33b446 | ||
|
|
88025d32c2 | ||
|
|
af64764082 | ||
|
|
70487f0c2e | ||
|
|
55d7d9cc75 | ||
|
|
106674175c | ||
|
|
dd1d5bdb25 | ||
|
|
6259ac0bec | ||
|
|
ba31f8a9a9 | ||
|
|
0ba57d6dc5 | ||
|
|
abc133e936 | ||
|
|
57743239d7 | ||
|
|
4a394c60cf | ||
|
|
624d28a93d | ||
|
|
29e1ea59fc | ||
|
|
2e5d24f272 | ||
|
|
1afa340b1a | ||
|
|
3b381b5a8c | ||
|
|
f2b9684de8 | ||
|
|
a66b3497e0 | ||
|
|
683ec8e5f2 | ||
|
|
f31f0cf733 | ||
|
|
38265b3123 | ||
|
|
caca28286c | ||
|
|
38320a5100 | ||
|
|
7badaab17d | ||
|
|
aa0c59bb51 | ||
|
|
e4acaa5c8f | ||
|
|
9ba47cae20 | ||
|
|
bf4310ca71 | ||
|
|
e75f98317f | ||
|
|
1249d4a6e3 | ||
|
|
66c9f4708d | ||
|
|
32277193b6 | ||
|
|
620ee2875e | ||
|
|
5553588147 | ||
|
|
1c29b3bd85 | ||
|
|
e88b807a13 | ||
|
|
9e55ef3d4b | ||
|
|
8062a47d16 | ||
|
|
dba8c43ecb | ||
|
|
8ebf2ddf15 | ||
|
|
f4625c2671 | ||
|
|
c94742bde6 | ||
|
|
a34faf0bd8 | ||
|
|
ecfff6cb1e | ||
|
|
ba8bed6870 | ||
|
|
ca186bca61 | ||
|
|
e2f109807c | ||
|
|
281bd31db2 | ||
|
|
cea1874e00 | ||
|
|
89b0e9e4de | ||
|
|
26d0d55d97 | ||
|
|
059c5586a4 | ||
|
|
9ed5698aa8 | ||
|
|
0b5696c5d4 | ||
|
|
a51142674a | ||
|
|
b8b671c0db | ||
|
|
7cceafe0dd | ||
|
|
cbe32b647a | ||
|
|
9a8e0842bb | ||
|
|
1d7671298f | ||
|
|
e38d75c3dc | ||
|
|
21fab9785a | ||
|
|
b3429553bb | ||
|
|
e480844042 | ||
|
|
26029108f7 | ||
|
|
504ac82077 | ||
|
|
6b11740dda | ||
|
|
a80e3448f5 | ||
|
|
4bda174eb9 | ||
|
|
b1e28c2f2c | ||
|
|
83000a4190 | ||
|
|
c98205d0d7 | ||
|
|
ce2ad5903c | ||
|
|
fe3980a369 | ||
|
|
ea97ae5ae8 | ||
|
|
3605b6b1a3 | ||
|
|
fc31dddbf7 | ||
|
|
6ad01d824d | ||
|
|
78f9f3ee95 | ||
|
|
972398d203 | ||
|
|
857889d1fa | ||
|
|
8074a802d6 | ||
|
|
059d5a682c | ||
|
|
00c2d8f95d | ||
|
|
04a596179b | ||
|
|
3fcb2720d7 | ||
|
|
6f7160b9fd | ||
|
|
6b4e464d17 | ||
|
|
9f7841a04b | ||
|
|
468644ab18 | ||
|
|
9d127fee6b | ||
|
|
6658897210 | ||
|
|
af7b194bec | ||
|
|
de1ea50e6d | ||
|
|
2680ef52c2 | ||
|
|
a012bb6e07 | ||
|
|
6a2c53f6c5 | ||
|
|
2cbf7d9221 | ||
|
|
fe7ed72c9c | ||
|
|
85a5a7c47a | ||
|
|
af3fd26d4e | ||
|
|
5127fd6320 | ||
|
|
124d34a8cc | ||
|
|
e8387d7523 | ||
|
|
a5d08c981b | ||
|
|
811d0da0f0 | ||
|
|
17e1fc5254 | ||
|
|
84e031edc2 | ||
|
|
b6b7e737e0 | ||
|
|
5f3e7afd45 | ||
|
|
b0cfca9d24 | ||
|
|
985ef89825 | ||
|
|
5928ade5fd | ||
|
|
93ebc175c6 | ||
|
|
386d552493 | ||
|
|
799cf06d20 | ||
|
|
922716d2ab | ||
|
|
66fc110b64 | ||
|
|
822f1e1f06 | ||
|
|
5d60c3c8e1 | ||
|
|
4e21d01c7f | ||
|
|
6b7b0b3777 | ||
|
|
07feb5ba07 | ||
|
|
a18d7adad4 | ||
|
|
32dff2c4e3 | ||
|
|
575ecb4028 | ||
|
|
ad8778df6c | ||
|
|
d2f5103f9f | ||
|
|
dd42a56084 | ||
|
|
23ac340a3f | ||
|
|
6791b4eaa8 | ||
|
|
a8b042177d | ||
|
|
76825f4261 | ||
|
|
78cb4d75ad | ||
|
|
a18bbac262 | ||
|
|
9ff5596963 | ||
|
|
8ea596b1e9 | ||
|
|
e3a143eaed | ||
|
|
c359ab6d9b | ||
|
|
dbfaa07e03 | ||
|
|
7f78fe7a36 | ||
|
|
6cf5b402c6 | ||
|
|
b0c7c7cb47 | ||
|
|
4d68cd8dbb | ||
|
|
2c1fa30639 | ||
|
|
708c68413d | ||
|
|
1d884fb794 | ||
|
|
f6a44681a8 | ||
|
|
d4df312300 | ||
|
|
9c0d44b412 | ||
|
|
27826369f0 | ||
|
|
31d8b50276 | ||
|
|
40b4fa7238 | ||
|
|
3b1743b7c2 | ||
|
|
f489c818f1 | ||
|
|
af477fa295 | ||
|
|
0ff0290735 | ||
|
|
67dbe6d949 | ||
|
|
4c3c2297b9 | ||
|
|
cadea55521 | ||
|
|
c8f30b1392 | ||
|
|
3d14a98abf | ||
|
|
77024bfca7 | ||
|
|
4a1c3786a1 | ||
|
|
b239891986 | ||
|
|
9fb03d43ff | ||
|
|
bdc59786bd | ||
|
|
fb6e926500 | ||
|
|
48ccd63dba | ||
|
|
ee647a05dc | ||
|
|
154b52ca4d | ||
|
|
5dd460c3ce | ||
|
|
4897ce2a13 | ||
|
|
5425526d50 | ||
|
|
5a4b050e66 | ||
|
|
8d39520232 | ||
|
|
04d12a1e98 | ||
|
|
39aa70963b | ||
|
|
5743254a41 | ||
|
|
c538ffea26 | ||
|
|
e8d3a7c870 | ||
|
|
2be66b1546 | ||
|
|
76e181fd44 | ||
|
|
b5d42fbc66 | ||
|
|
b463cd763e | ||
|
|
eb320df41d | ||
|
|
de1869773f | ||
|
|
ef89c7e537 | ||
|
|
008645d386 | ||
|
|
f8042ffb41 | ||
|
|
dbe22be598 | ||
|
|
8f6078d007 | ||
|
|
4020bf47e2 | ||
|
|
9d685da759 | ||
|
|
e3289856c0 | ||
|
|
47b8153728 | ||
|
|
7901e4c082 | ||
|
|
18b0977a31 | ||
|
|
fc6b214470 | ||
|
|
e22211dac0 | ||
|
|
e222484663 | ||
|
|
2a9cea6689 | ||
|
|
93da75209c | ||
|
|
9c819f0fd8 | ||
|
|
eef6fcf286 | ||
|
|
e375d9f787 | ||
|
|
ab18174774 | ||
|
|
9265841384 | ||
|
|
c5fd08125d | ||
|
|
11d88dae7f | ||
|
|
3b495659b0 | ||
|
|
15c9a3a4b6 | ||
|
|
60e77e4ed6 | ||
|
|
fa832a8ac6 | ||
|
|
f7834d7d59 | ||
|
|
63d7461510 | ||
|
|
1de704160e | ||
|
|
b118a2565c | ||
|
|
eb166baafe | ||
|
|
818d37f304 | ||
|
|
9cdb801c1c | ||
|
|
5da8cde4fc | ||
|
|
6ec3dc0c0d | ||
|
|
6050dffb25 | ||
|
|
93efeafe30 | ||
|
|
f167e8a8d3 | ||
|
|
124d49f35e | ||
|
|
52d8efa892 | ||
|
|
4ea8416c68 | ||
|
|
8dd0bfb068 | ||
|
|
6ff1c7d541 | ||
|
|
19f5a9c3a9 | ||
|
|
d9ce9c62ac | ||
|
|
cdc468a38c | ||
|
|
2656f13a4a | ||
|
|
da61396b1c | ||
|
|
6c9fb617dc | ||
|
|
5dd73fe53e | ||
|
|
e6793be465 | ||
|
|
63e62c5720 |
@@ -117,13 +117,13 @@ Stateless fields do not store their value in the node, so their field instances
|
||||
|
||||
"Custom" fields will always be treated as stateless fields.
|
||||
|
||||
##### Collection and Scalar Fields
|
||||
##### Single and Collection Fields
|
||||
|
||||
Field types have a name and two flags which may identify it as a **collection** or **collection or scalar** field.
|
||||
Field types have a name and cardinality property which may identify it as a **SINGLE**, **COLLECTION** or **SINGLE_OR_COLLECTION** field.
|
||||
|
||||
If a field is annotated in python as a list, its field type is parsed and flagged as a **collection** type (e.g. `list[int]`).
|
||||
|
||||
If it is annotated as a union of a type and list, the type will be flagged as a **collection or scalar** type (e.g. `Union[int, list[int]]`). Fields may not be unions of different types (e.g. `Union[int, list[str]]` and `Union[int, str]` are not allowed).
|
||||
- If a field is annotated in python as a singular value or class, its field type is parsed as a **SINGLE** type (e.g. `int`, `ImageField`, `str`).
|
||||
- If a field is annotated in python as a list, its field type is parsed as a **COLLECTION** type (e.g. `list[int]`).
|
||||
- If it is annotated as a union of a type and list, the type will be parsed as a **SINGLE_OR_COLLECTION** type (e.g. `Union[int, list[int]]`). Fields may not be unions of different types (e.g. `Union[int, list[str]]` and `Union[int, str]` are not allowed).
|
||||
|
||||
## Implementation
|
||||
|
||||
@@ -173,8 +173,7 @@ Field types are represented as structured objects:
|
||||
```ts
|
||||
type FieldType = {
|
||||
name: string;
|
||||
isCollection: boolean;
|
||||
isCollectionOrScalar: boolean;
|
||||
cardinality: 'SINGLE' | 'COLLECTION' | 'SINGLE_OR_COLLECTION';
|
||||
};
|
||||
```
|
||||
|
||||
@@ -186,7 +185,7 @@ There are 4 general cases for field type parsing.
|
||||
|
||||
When a field is annotated as a primitive values (e.g. `int`, `str`, `float`), the field type parsing is fairly straightforward. The field is represented by a simple OpenAPI **schema object**, which has a `type` property.
|
||||
|
||||
We create a field type name from this `type` string (e.g. `string` -> `StringField`).
|
||||
We create a field type name from this `type` string (e.g. `string` -> `StringField`). The cardinality is `"SINGLE"`.
|
||||
|
||||
##### Complex Types
|
||||
|
||||
@@ -200,13 +199,13 @@ We need to **dereference** the schema to pull these out. Dereferencing may requi
|
||||
|
||||
When a field is annotated as a list of a single type, the schema object has an `items` property. They may be a schema object or reference object and must be parsed to determine the item type.
|
||||
|
||||
We use the item type for field type name, adding `isCollection: true` to the field type.
|
||||
We use the item type for field type name. The cardinality is `"COLLECTION"`.
|
||||
|
||||
##### Collection or Scalar Types
|
||||
##### Single or Collection Types
|
||||
|
||||
When a field is annotated as a union of a type and list of that type, the schema object has an `anyOf` property, which holds a list of valid types for the union.
|
||||
|
||||
After verifying that the union has two members (a type and list of the same type), we use the type for field type name, adding `isCollectionOrScalar: true` to the field type.
|
||||
After verifying that the union has two members (a type and list of the same type), we use the type for field type name, with cardinality `"SINGLE_OR_COLLECTION"`.
|
||||
|
||||
##### Optional Fields
|
||||
|
||||
|
||||
@@ -165,7 +165,7 @@ Additionally, each section can be expanded with the "Show Advanced" button in o
|
||||
There are several ways to install IP-Adapter models with an existing InvokeAI installation:
|
||||
|
||||
1. Through the command line interface launched from the invoke.sh / invoke.bat scripts, option [4] to download models.
|
||||
2. Through the Model Manager UI with models from the *Tools* section of [www.models.invoke.ai](https://www.models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. **Note** Both the IP-Adapter and the Image Encoder must be installed for IP-Adapter to work. For example, the [SD 1.5 IP-Adapter](https://models.invoke.ai/InvokeAI/ip_adapter_plus_sd15) and [SD1.5 Image Encoder](https://models.invoke.ai/InvokeAI/ip_adapter_sd_image_encoder) must be installed to use IP-Adapter with SD1.5 based models.
|
||||
2. Through the Model Manager UI with models from the *Tools* section of [models.invoke.ai](https://models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. **Note** Both the IP-Adapter and the Image Encoder must be installed for IP-Adapter to work. For example, the [SD 1.5 IP-Adapter](https://models.invoke.ai/InvokeAI/ip_adapter_plus_sd15) and [SD1.5 Image Encoder](https://models.invoke.ai/InvokeAI/ip_adapter_sd_image_encoder) must be installed to use IP-Adapter with SD1.5 based models.
|
||||
3. **Advanced -- Not recommended ** Manually downloading the IP-Adapter and Image Encoder files - Image Encoder folders shouid be placed in the `models\any\clip_vision` folders. IP Adapter Model folders should be placed in the relevant `ip-adapter` folder of relevant base model folder of Invoke root directory. For example, for the SDXL IP-Adapter, files should be added to the `model/sdxl/ip_adapter/` folder.
|
||||
|
||||
#### Using IP-Adapter
|
||||
|
||||
@@ -98,7 +98,7 @@ Updating is exactly the same as installing - download the latest installer, choo
|
||||
|
||||
If you have installation issues, please review the [FAQ]. You can also [create an issue] or ask for help on [discord].
|
||||
|
||||
[installation requirements]: INSTALLATION.md#installation-requirements
|
||||
[installation requirements]: INSTALL_REQUIREMENTS.md
|
||||
[FAQ]: ../help/FAQ.md
|
||||
[install some models]: 050_INSTALLING_MODELS.md
|
||||
[configuration docs]: ../features/CONFIGURATION.md
|
||||
|
||||
@@ -10,7 +10,7 @@ InvokeAI is distributed as a python package on PyPI, installable with `pip`. The
|
||||
|
||||
### Requirements
|
||||
|
||||
Before you start, go through the [installation requirements].
|
||||
Before you start, go through the [installation requirements](./INSTALL_REQUIREMENTS.md).
|
||||
|
||||
### Installation Walkthrough
|
||||
|
||||
@@ -79,7 +79,7 @@ Before you start, go through the [installation requirements].
|
||||
|
||||
1. Install the InvokeAI Package. The base command is `pip install InvokeAI --use-pep517`, but you may need to change this depending on your system and the desired features.
|
||||
|
||||
- You may need to provide an [extra index URL]. Select your platform configuration using [this tool on the PyTorch website]. Copy the `--extra-index-url` string from this and append it to your install command.
|
||||
- You may need to provide an [extra index URL](https://pip.pypa.io/en/stable/cli/pip_install/#cmdoption-extra-index-url). Select your platform configuration using [this tool on the PyTorch website](https://pytorch.org/get-started/locally/). Copy the `--extra-index-url` string from this and append it to your install command.
|
||||
|
||||
!!! example "Install with an extra index URL"
|
||||
|
||||
@@ -116,4 +116,4 @@ Before you start, go through the [installation requirements].
|
||||
|
||||
!!! warning
|
||||
|
||||
If the virtual environment is _not_ inside the root directory, then you _must_ specify the path to the root directory with `--root_dir \path\to\invokeai` or the `INVOKEAI_ROOT` environment variable.
|
||||
If the virtual environment is _not_ inside the root directory, then you _must_ specify the path to the root directory with `--root \path\to\invokeai` or the `INVOKEAI_ROOT` environment variable.
|
||||
|
||||
@@ -37,13 +37,13 @@ Invoke runs best with a dedicated GPU, but will fall back to running on CPU, alb
|
||||
=== "Nvidia"
|
||||
|
||||
```
|
||||
Any GPU with at least 8GB VRAM. Linux only.
|
||||
Any GPU with at least 8GB VRAM.
|
||||
```
|
||||
|
||||
=== "AMD"
|
||||
|
||||
```
|
||||
Any GPU with at least 16GB VRAM.
|
||||
Any GPU with at least 16GB VRAM. Linux only.
|
||||
```
|
||||
|
||||
=== "Mac"
|
||||
|
||||
@@ -10,8 +10,7 @@ set INVOKEAI_ROOT=.
|
||||
echo Desired action:
|
||||
echo 1. Generate images with the browser-based interface
|
||||
echo 2. Open the developer console
|
||||
echo 3. Run the InvokeAI image database maintenance script
|
||||
echo 4. Command-line help
|
||||
echo 3. Command-line help
|
||||
echo Q - Quit
|
||||
echo.
|
||||
echo To update, download and run the installer from https://github.com/invoke-ai/InvokeAI/releases/latest.
|
||||
@@ -34,9 +33,6 @@ IF /I "%choice%" == "1" (
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) ELSE IF /I "%choice%" == "3" (
|
||||
echo Running the db maintenance script...
|
||||
python .venv\Scripts\invokeai-db-maintenance.exe
|
||||
) ELSE IF /I "%choice%" == "4" (
|
||||
echo Displaying command line help...
|
||||
python .venv\Scripts\invokeai-web.exe --help %*
|
||||
pause
|
||||
|
||||
@@ -47,11 +47,6 @@ do_choice() {
|
||||
bash --init-file "$file_name"
|
||||
;;
|
||||
3)
|
||||
clear
|
||||
printf "Running the db maintenance script\n"
|
||||
invokeai-db-maintenance --root ${INVOKEAI_ROOT}
|
||||
;;
|
||||
4)
|
||||
clear
|
||||
printf "Command-line help\n"
|
||||
invokeai-web --help
|
||||
@@ -71,8 +66,7 @@ do_line_input() {
|
||||
printf "What would you like to do?\n"
|
||||
printf "1: Generate images using the browser-based interface\n"
|
||||
printf "2: Open the developer console\n"
|
||||
printf "3: Run the InvokeAI image database maintenance script\n"
|
||||
printf "4: Command-line help\n"
|
||||
printf "3: Command-line help\n"
|
||||
printf "Q: Quit\n\n"
|
||||
printf "To update, download and run the installer from https://github.com/invoke-ai/InvokeAI/releases/latest.\n\n"
|
||||
read -p "Please enter 1-4, Q: [1] " yn
|
||||
|
||||
@@ -29,7 +29,7 @@ from ..services.model_images.model_images_default import ModelImageFileStorageDi
|
||||
from ..services.model_manager.model_manager_default import ModelManagerService
|
||||
from ..services.model_records import ModelRecordServiceSQL
|
||||
from ..services.names.names_default import SimpleNameService
|
||||
from ..services.session_processor.session_processor_default import DefaultSessionProcessor
|
||||
from ..services.session_processor.session_processor_default import DefaultSessionProcessor, DefaultSessionRunner
|
||||
from ..services.session_queue.session_queue_sqlite import SqliteSessionQueue
|
||||
from ..services.urls.urls_default import LocalUrlService
|
||||
from ..services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
|
||||
@@ -103,7 +103,8 @@ class ApiDependencies:
|
||||
)
|
||||
names = SimpleNameService()
|
||||
performance_statistics = InvocationStatsService()
|
||||
session_processor = DefaultSessionProcessor()
|
||||
|
||||
session_processor = DefaultSessionProcessor(session_runner=DefaultSessionRunner())
|
||||
session_queue = SqliteSessionQueue(db=db)
|
||||
urls = LocalUrlService()
|
||||
workflow_records = SqliteWorkflowRecordsStorage(db=db)
|
||||
|
||||
@@ -13,7 +13,6 @@ from pydantic import BaseModel, Field
|
||||
from invokeai.app.invocations.upscale import ESRGAN_MODELS
|
||||
from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus
|
||||
from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch
|
||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||
from invokeai.backend.util.logging import logging
|
||||
from invokeai.version import __version__
|
||||
|
||||
@@ -109,9 +108,7 @@ async def get_config() -> AppConfig:
|
||||
upscaling_models.append(str(Path(model).stem))
|
||||
upscaler = Upscaler(upscaling_method="esrgan", upscaling_models=upscaling_models)
|
||||
|
||||
nsfw_methods = []
|
||||
if SafetyChecker.safety_checker_available():
|
||||
nsfw_methods.append("nsfw_checker")
|
||||
nsfw_methods = ["nsfw_checker"]
|
||||
|
||||
watermarking_methods = ["invisible_watermark"]
|
||||
|
||||
|
||||
@@ -6,13 +6,12 @@ from fastapi import BackgroundTasks, Body, HTTPException, Path, Query, Request,
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.routing import APIRouter
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
from pydantic import BaseModel, Field, JsonValue
|
||||
|
||||
from invokeai.app.invocations.fields import MetadataField, MetadataFieldValidator
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin
|
||||
from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID, WorkflowWithoutIDValidator
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
@@ -42,13 +41,17 @@ async def upload_image(
|
||||
board_id: Optional[str] = Query(default=None, description="The board to add this image to, if any"),
|
||||
session_id: Optional[str] = Query(default=None, description="The session ID associated with this upload, if any"),
|
||||
crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"),
|
||||
metadata: Optional[JsonValue] = Body(
|
||||
default=None, description="The metadata to associate with the image", embed=True
|
||||
),
|
||||
) -> ImageDTO:
|
||||
"""Uploads an image"""
|
||||
if not file.content_type or not file.content_type.startswith("image"):
|
||||
raise HTTPException(status_code=415, detail="Not an image")
|
||||
|
||||
metadata = None
|
||||
workflow = None
|
||||
_metadata = None
|
||||
_workflow = None
|
||||
_graph = None
|
||||
|
||||
contents = await file.read()
|
||||
try:
|
||||
@@ -62,22 +65,28 @@ async def upload_image(
|
||||
|
||||
# TODO: retain non-invokeai metadata on upload?
|
||||
# attempt to parse metadata from image
|
||||
metadata_raw = pil_image.info.get("invokeai_metadata", None)
|
||||
if metadata_raw:
|
||||
try:
|
||||
metadata = MetadataFieldValidator.validate_json(metadata_raw)
|
||||
except ValidationError:
|
||||
ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image")
|
||||
pass
|
||||
metadata_raw = metadata if isinstance(metadata, str) else pil_image.info.get("invokeai_metadata", None)
|
||||
if isinstance(metadata_raw, str):
|
||||
_metadata = metadata_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.debug("Failed to parse metadata for uploaded image")
|
||||
pass
|
||||
|
||||
# attempt to parse workflow from image
|
||||
workflow_raw = pil_image.info.get("invokeai_workflow", None)
|
||||
if workflow_raw is not None:
|
||||
try:
|
||||
workflow = WorkflowWithoutIDValidator.validate_json(workflow_raw)
|
||||
except ValidationError:
|
||||
ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image")
|
||||
pass
|
||||
if isinstance(workflow_raw, str):
|
||||
_workflow = workflow_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.debug("Failed to parse workflow for uploaded image")
|
||||
pass
|
||||
|
||||
# attempt to extract graph from image
|
||||
graph_raw = pil_image.info.get("invokeai_graph", None)
|
||||
if isinstance(graph_raw, str):
|
||||
_graph = graph_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.debug("Failed to parse graph for uploaded image")
|
||||
pass
|
||||
|
||||
try:
|
||||
image_dto = ApiDependencies.invoker.services.images.create(
|
||||
@@ -86,8 +95,9 @@ async def upload_image(
|
||||
image_category=image_category,
|
||||
session_id=session_id,
|
||||
board_id=board_id,
|
||||
metadata=metadata,
|
||||
workflow=workflow,
|
||||
metadata=_metadata,
|
||||
workflow=_workflow,
|
||||
graph=_graph,
|
||||
is_intermediate=is_intermediate,
|
||||
)
|
||||
|
||||
@@ -185,14 +195,21 @@ async def get_image_metadata(
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
class WorkflowAndGraphResponse(BaseModel):
|
||||
workflow: Optional[str] = Field(description="The workflow used to generate the image, as stringified JSON")
|
||||
graph: Optional[str] = Field(description="The graph used to generate the image, as stringified JSON")
|
||||
|
||||
|
||||
@images_router.get(
|
||||
"/i/{image_name}/workflow", operation_id="get_image_workflow", response_model=Optional[WorkflowWithoutID]
|
||||
"/i/{image_name}/workflow", operation_id="get_image_workflow", response_model=WorkflowAndGraphResponse
|
||||
)
|
||||
async def get_image_workflow(
|
||||
image_name: str = Path(description="The name of image whose workflow to get"),
|
||||
) -> Optional[WorkflowWithoutID]:
|
||||
) -> WorkflowAndGraphResponse:
|
||||
try:
|
||||
return ApiDependencies.invoker.services.images.get_workflow(image_name)
|
||||
workflow = ApiDependencies.invoker.services.images.get_workflow(image_name)
|
||||
graph = ApiDependencies.invoker.services.images.get_graph(image_name)
|
||||
return WorkflowAndGraphResponse(workflow=workflow, graph=graph)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import pathlib
|
||||
import shutil
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
|
||||
from fastapi import Body, Path, Query, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
@@ -16,6 +16,7 @@ from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field
|
||||
from starlette.exceptions import HTTPException
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from invokeai.app.services.model_images.model_images_common import ModelImageFileNotFoundException
|
||||
from invokeai.app.services.model_install import ModelInstallJob
|
||||
from invokeai.app.services.model_records import (
|
||||
DuplicateModelException,
|
||||
@@ -52,6 +53,13 @@ class ModelsList(BaseModel):
|
||||
model_config = ConfigDict(use_enum_values=True)
|
||||
|
||||
|
||||
def add_cover_image_to_model_config(config: AnyModelConfig, dependencies: Type[ApiDependencies]) -> AnyModelConfig:
|
||||
"""Add a cover image URL to a model configuration."""
|
||||
cover_image = dependencies.invoker.services.model_images.get_url(config.key)
|
||||
config.cover_image = cover_image
|
||||
return config
|
||||
|
||||
|
||||
##############################################################################
|
||||
# These are example inputs and outputs that are used in places where Swagger
|
||||
# is unable to generate a correct example.
|
||||
@@ -118,8 +126,7 @@ async def list_model_records(
|
||||
record_store.search_by_attr(model_type=model_type, model_name=model_name, model_format=model_format)
|
||||
)
|
||||
for model in found_models:
|
||||
cover_image = ApiDependencies.invoker.services.model_images.get_url(model.key)
|
||||
model.cover_image = cover_image
|
||||
model = add_cover_image_to_model_config(model, ApiDependencies)
|
||||
return ModelsList(models=found_models)
|
||||
|
||||
|
||||
@@ -160,12 +167,9 @@ async def get_model_record(
|
||||
key: str = Path(description="Key of the model record to fetch."),
|
||||
) -> AnyModelConfig:
|
||||
"""Get a model record"""
|
||||
record_store = ApiDependencies.invoker.services.model_manager.store
|
||||
try:
|
||||
config: AnyModelConfig = record_store.get_model(key)
|
||||
cover_image = ApiDependencies.invoker.services.model_images.get_url(key)
|
||||
config.cover_image = cover_image
|
||||
return config
|
||||
config = ApiDependencies.invoker.services.model_manager.store.get_model(key)
|
||||
return add_cover_image_to_model_config(config, ApiDependencies)
|
||||
except UnknownModelException as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
@@ -294,14 +298,15 @@ async def update_model_record(
|
||||
installer = ApiDependencies.invoker.services.model_manager.install
|
||||
try:
|
||||
record_store.update_model(key, changes=changes)
|
||||
model_response: AnyModelConfig = installer.sync_model_path(key)
|
||||
config = installer.sync_model_path(key)
|
||||
config = add_cover_image_to_model_config(config, ApiDependencies)
|
||||
logger.info(f"Updated model: {key}")
|
||||
except UnknownModelException as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except ValueError as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
return model_response
|
||||
return config
|
||||
|
||||
|
||||
@model_manager_router.get(
|
||||
@@ -648,6 +653,14 @@ async def convert_model(
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
|
||||
# Update the model image if the model had one
|
||||
try:
|
||||
model_image = ApiDependencies.invoker.services.model_images.get(key)
|
||||
ApiDependencies.invoker.services.model_images.save(model_image, new_key)
|
||||
ApiDependencies.invoker.services.model_images.delete(key)
|
||||
except ModelImageFileNotFoundException:
|
||||
pass
|
||||
|
||||
# delete the original safetensors file
|
||||
installer.delete(key)
|
||||
|
||||
@@ -655,7 +668,8 @@ async def convert_model(
|
||||
shutil.rmtree(cache_path)
|
||||
|
||||
# return the config record for the new diffusers directory
|
||||
new_config: AnyModelConfig = store.get_model(new_key)
|
||||
new_config = store.get_model(new_key)
|
||||
new_config = add_cover_image_to_model_config(new_config, ApiDependencies)
|
||||
return new_config
|
||||
|
||||
|
||||
|
||||
@@ -203,6 +203,7 @@ async def get_batch_status(
|
||||
responses={
|
||||
200: {"model": SessionQueueItem},
|
||||
},
|
||||
response_model_exclude_none=True,
|
||||
)
|
||||
async def get_queue_item(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
|
||||
@@ -164,6 +164,12 @@ def custom_openapi() -> dict[str, Any]:
|
||||
for schema_key, schema_json in additional_schemas[1]["$defs"].items():
|
||||
openapi_schema["components"]["schemas"][schema_key] = schema_json
|
||||
|
||||
openapi_schema["components"]["schemas"]["InvocationOutputMap"] = {
|
||||
"type": "object",
|
||||
"properties": {},
|
||||
"required": [],
|
||||
}
|
||||
|
||||
# Add a reference to the output type to additionalProperties of the invoker schema
|
||||
for invoker in all_invocations:
|
||||
invoker_name = invoker.__name__ # type: ignore [attr-defined] # this is a valid attribute
|
||||
@@ -172,6 +178,8 @@ def custom_openapi() -> dict[str, Any]:
|
||||
invoker_schema = openapi_schema["components"]["schemas"][f"{invoker_name}"]
|
||||
outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"}
|
||||
invoker_schema["output"] = outputs_ref
|
||||
openapi_schema["components"]["schemas"]["InvocationOutputMap"]["properties"][invoker.get_type()] = outputs_ref
|
||||
openapi_schema["components"]["schemas"]["InvocationOutputMap"]["required"].append(invoker.get_type())
|
||||
invoker_schema["class"] = "invocation"
|
||||
|
||||
# This code no longer seems to be necessary?
|
||||
|
||||
@@ -24,7 +24,6 @@ from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
OutputField,
|
||||
UIType,
|
||||
@@ -80,13 +79,13 @@ class ControlOutput(BaseInvocationOutput):
|
||||
control: ControlField = OutputField(description=FieldDescriptions.control)
|
||||
|
||||
|
||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.1")
|
||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.2")
|
||||
class ControlNetInvocation(BaseInvocation):
|
||||
"""Collects ControlNet info to pass to other nodes"""
|
||||
|
||||
image: ImageField = InputField(description="The control image")
|
||||
control_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.controlnet_model, input=Input.Direct, ui_type=UIType.ControlNetModel
|
||||
description=FieldDescriptions.controlnet_model, ui_type=UIType.ControlNetModel
|
||||
)
|
||||
control_weight: Union[float, List[float]] = InputField(
|
||||
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Literal, Optional
|
||||
|
||||
import cv2
|
||||
@@ -504,7 +503,7 @@ class ImageInverseLerpInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Blur NSFW Image",
|
||||
tags=["image", "nsfw"],
|
||||
category="image",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add blur to NSFW-flagged images"""
|
||||
@@ -516,23 +515,12 @@ class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
logger = context.logger
|
||||
logger.debug("Running NSFW checker")
|
||||
if SafetyChecker.has_nsfw_concept(image):
|
||||
logger.info("A potentially NSFW image has been detected. Image will be blurred.")
|
||||
blurry_image = image.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
caution = self._get_caution_img()
|
||||
blurry_image.paste(caution, (0, 0), caution)
|
||||
image = blurry_image
|
||||
image = SafetyChecker.blur_if_nsfw(image)
|
||||
|
||||
image_dto = context.images.save(image=image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
def _get_caution_img(self) -> Image.Image:
|
||||
import invokeai.app.assets.images as image_assets
|
||||
|
||||
caution = Image.open(Path(image_assets.__path__[0]) / "caution.png")
|
||||
return caution.resize((caution.width // 2, caution.height // 2))
|
||||
|
||||
|
||||
@invocation(
|
||||
"img_watermark",
|
||||
|
||||
@@ -5,7 +5,7 @@ from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, TensorField, UIType
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, TensorField, UIType
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
@@ -58,7 +58,7 @@ class IPAdapterOutput(BaseInvocationOutput):
|
||||
CLIP_VISION_MODEL_MAP = {"ViT-H": "ip_adapter_sd_image_encoder", "ViT-G": "ip_adapter_sdxl_image_encoder"}
|
||||
|
||||
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.4.0")
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.4.1")
|
||||
class IPAdapterInvocation(BaseInvocation):
|
||||
"""Collects IP-Adapter info to pass to other nodes."""
|
||||
|
||||
@@ -67,7 +67,6 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
ip_adapter_model: ModelIdentifierField = InputField(
|
||||
description="The IP-Adapter model.",
|
||||
title="IP-Adapter Model",
|
||||
input=Input.Direct,
|
||||
ui_order=-1,
|
||||
ui_type=UIType.IPAdapterModel,
|
||||
)
|
||||
|
||||
@@ -586,13 +586,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
unet: UNet2DConditionModel,
|
||||
scheduler: Scheduler,
|
||||
) -> StableDiffusionGeneratorPipeline:
|
||||
# TODO:
|
||||
# configure_model_padding(
|
||||
# unet,
|
||||
# self.seamless,
|
||||
# self.seamless_axes,
|
||||
# )
|
||||
|
||||
class FakeVae:
|
||||
class FakeVaeConfig:
|
||||
def __init__(self) -> None:
|
||||
|
||||
@@ -11,6 +11,7 @@ from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType,
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
@@ -93,19 +94,46 @@ class ModelLoaderOutput(UNetOutput, CLIPOutput, VAEOutput):
|
||||
pass
|
||||
|
||||
|
||||
@invocation_output("model_identifier_output")
|
||||
class ModelIdentifierOutput(BaseInvocationOutput):
|
||||
"""Model identifier output"""
|
||||
|
||||
model: ModelIdentifierField = OutputField(description="Model identifier", title="Model")
|
||||
|
||||
|
||||
@invocation(
|
||||
"model_identifier",
|
||||
title="Model identifier",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class ModelIdentifierInvocation(BaseInvocation):
|
||||
"""Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as
|
||||
input for any model, even if the model types don't match. If you connect this to a mismatched input, you'll get an
|
||||
error."""
|
||||
|
||||
model: ModelIdentifierField = InputField(description="The model to select", title="Model")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ModelIdentifierOutput:
|
||||
if not context.models.exists(self.model.key):
|
||||
raise Exception(f"Unknown model {self.model.key}")
|
||||
|
||||
return ModelIdentifierOutput(model=self.model)
|
||||
|
||||
|
||||
@invocation(
|
||||
"main_model_loader",
|
||||
title="Main Model",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.2",
|
||||
version="1.0.3",
|
||||
)
|
||||
class MainModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a main model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.main_model, input=Input.Direct, ui_type=UIType.MainModel
|
||||
)
|
||||
model: ModelIdentifierField = InputField(description=FieldDescriptions.main_model, ui_type=UIType.MainModel)
|
||||
# TODO: precision?
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ModelLoaderOutput:
|
||||
@@ -134,12 +162,12 @@ class LoRALoaderOutput(BaseInvocationOutput):
|
||||
clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP")
|
||||
|
||||
|
||||
@invocation("lora_loader", title="LoRA", tags=["model"], category="model", version="1.0.2")
|
||||
@invocation("lora_loader", title="LoRA", tags=["model"], category="model", version="1.0.3")
|
||||
class LoRALoaderInvocation(BaseInvocation):
|
||||
"""Apply selected lora to unet and text_encoder."""
|
||||
|
||||
lora: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA", ui_type=UIType.LoRAModel
|
||||
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
|
||||
)
|
||||
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
@@ -190,6 +218,75 @@ class LoRALoaderInvocation(BaseInvocation):
|
||||
return output
|
||||
|
||||
|
||||
@invocation_output("lora_selector_output")
|
||||
class LoRASelectorOutput(BaseInvocationOutput):
|
||||
"""Model loader output"""
|
||||
|
||||
lora: LoRAField = OutputField(description="LoRA model and weight", title="LoRA")
|
||||
|
||||
|
||||
@invocation("lora_selector", title="LoRA Selector", tags=["model"], category="model", version="1.0.1")
|
||||
class LoRASelectorInvocation(BaseInvocation):
|
||||
"""Selects a LoRA model and weight."""
|
||||
|
||||
lora: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
|
||||
)
|
||||
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LoRASelectorOutput:
|
||||
return LoRASelectorOutput(lora=LoRAField(lora=self.lora, weight=self.weight))
|
||||
|
||||
|
||||
@invocation("lora_collection_loader", title="LoRA Collection Loader", tags=["model"], category="model", version="1.0.0")
|
||||
class LoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of LoRAs to the provided UNet and CLIP models."""
|
||||
|
||||
loras: LoRAField | list[LoRAField] = InputField(
|
||||
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.unet,
|
||||
input=Input.Connection,
|
||||
title="UNet",
|
||||
)
|
||||
clip: Optional[CLIPField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
title="CLIP",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LoRALoaderOutput:
|
||||
output = LoRALoaderOutput()
|
||||
loras = self.loras if isinstance(self.loras, list) else [self.loras]
|
||||
added_loras: list[str] = []
|
||||
|
||||
for lora in loras:
|
||||
if lora.lora.key in added_loras:
|
||||
continue
|
||||
|
||||
if not context.models.exists(lora.lora.key):
|
||||
raise Exception(f"Unknown lora: {lora.lora.key}!")
|
||||
|
||||
assert lora.lora.base in (BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2)
|
||||
|
||||
added_loras.append(lora.lora.key)
|
||||
|
||||
if self.unet is not None:
|
||||
if output.unet is None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
output.unet.loras.append(lora)
|
||||
|
||||
if self.clip is not None:
|
||||
if output.clip is None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
output.clip.loras.append(lora)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
@invocation_output("sdxl_lora_loader_output")
|
||||
class SDXLLoRALoaderOutput(BaseInvocationOutput):
|
||||
"""SDXL LoRA Loader Output"""
|
||||
@@ -204,13 +301,13 @@ class SDXLLoRALoaderOutput(BaseInvocationOutput):
|
||||
title="SDXL LoRA",
|
||||
tags=["lora", "model"],
|
||||
category="model",
|
||||
version="1.0.2",
|
||||
version="1.0.3",
|
||||
)
|
||||
class SDXLLoRALoaderInvocation(BaseInvocation):
|
||||
"""Apply selected lora to unet and text_encoder."""
|
||||
|
||||
lora: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA", ui_type=UIType.LoRAModel
|
||||
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
|
||||
)
|
||||
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
@@ -279,12 +376,78 @@ class SDXLLoRALoaderInvocation(BaseInvocation):
|
||||
return output
|
||||
|
||||
|
||||
@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.2")
|
||||
@invocation(
|
||||
"sdxl_lora_collection_loader",
|
||||
title="SDXL LoRA Collection Loader",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
)
|
||||
class SDXLLoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of SDXL LoRAs to the provided UNet and CLIP models."""
|
||||
|
||||
loras: LoRAField | list[LoRAField] = InputField(
|
||||
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.unet,
|
||||
input=Input.Connection,
|
||||
title="UNet",
|
||||
)
|
||||
clip: Optional[CLIPField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
title="CLIP",
|
||||
)
|
||||
clip2: Optional[CLIPField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
title="CLIP 2",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> SDXLLoRALoaderOutput:
|
||||
output = SDXLLoRALoaderOutput()
|
||||
loras = self.loras if isinstance(self.loras, list) else [self.loras]
|
||||
added_loras: list[str] = []
|
||||
|
||||
for lora in loras:
|
||||
if lora.lora.key in added_loras:
|
||||
continue
|
||||
|
||||
if not context.models.exists(lora.lora.key):
|
||||
raise Exception(f"Unknown lora: {lora.lora.key}!")
|
||||
|
||||
assert lora.lora.base is BaseModelType.StableDiffusionXL
|
||||
|
||||
added_loras.append(lora.lora.key)
|
||||
|
||||
if self.unet is not None:
|
||||
if output.unet is None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
output.unet.loras.append(lora)
|
||||
|
||||
if self.clip is not None:
|
||||
if output.clip is None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
output.clip.loras.append(lora)
|
||||
|
||||
if self.clip2 is not None:
|
||||
if output.clip2 is None:
|
||||
output.clip2 = self.clip2.model_copy(deep=True)
|
||||
output.clip2.loras.append(lora)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.3")
|
||||
class VAELoaderInvocation(BaseInvocation):
|
||||
"""Loads a VAE model, outputting a VaeLoaderOutput"""
|
||||
|
||||
vae_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.vae_model, input=Input.Direct, title="VAE", ui_type=UIType.VAEModel
|
||||
description=FieldDescriptions.vae_model, title="VAE", ui_type=UIType.VAEModel
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> VAEOutput:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, UIType
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager import SubModelType
|
||||
|
||||
@@ -30,12 +30,12 @@ class SDXLRefinerModelLoaderOutput(BaseInvocationOutput):
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.2")
|
||||
@invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.3")
|
||||
class SDXLModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads an sdxl base model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.sdxl_main_model, input=Input.Direct, ui_type=UIType.SDXLMainModel
|
||||
description=FieldDescriptions.sdxl_main_model, ui_type=UIType.SDXLMainModel
|
||||
)
|
||||
# TODO: precision?
|
||||
|
||||
@@ -67,13 +67,13 @@ class SDXLModelLoaderInvocation(BaseInvocation):
|
||||
title="SDXL Refiner Model",
|
||||
tags=["model", "sdxl", "refiner"],
|
||||
category="model",
|
||||
version="1.0.2",
|
||||
version="1.0.3",
|
||||
)
|
||||
class SDXLRefinerModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads an sdxl refiner model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.sdxl_refiner_model, input=Input.Direct, ui_type=UIType.SDXLRefinerModel
|
||||
description=FieldDescriptions.sdxl_refiner_model, ui_type=UIType.SDXLRefinerModel
|
||||
)
|
||||
# TODO: precision?
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
@@ -45,7 +45,7 @@ class T2IAdapterOutput(BaseInvocationOutput):
|
||||
|
||||
|
||||
@invocation(
|
||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.2"
|
||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.3"
|
||||
)
|
||||
class T2IAdapterInvocation(BaseInvocation):
|
||||
"""Collects T2I-Adapter info to pass to other nodes."""
|
||||
@@ -55,7 +55,6 @@ class T2IAdapterInvocation(BaseInvocation):
|
||||
t2i_adapter_model: ModelIdentifierField = InputField(
|
||||
description="The T2I-Adapter model.",
|
||||
title="T2I-Adapter Model",
|
||||
input=Input.Direct,
|
||||
ui_order=-1,
|
||||
ui_type=UIType.T2IAdapterModel,
|
||||
)
|
||||
|
||||
@@ -121,7 +121,10 @@ class EventServiceBase:
|
||||
node: dict,
|
||||
source_node_id: str,
|
||||
error_type: str,
|
||||
error: str,
|
||||
error_message: str,
|
||||
error_traceback: str,
|
||||
user_id: str | None,
|
||||
project_id: str | None,
|
||||
) -> None:
|
||||
"""Emitted when an invocation has completed"""
|
||||
self.__emit_queue_event(
|
||||
@@ -134,7 +137,10 @@ class EventServiceBase:
|
||||
"node": node,
|
||||
"source_node_id": source_node_id,
|
||||
"error_type": error_type,
|
||||
"error": error,
|
||||
"error_message": error_message,
|
||||
"error_traceback": error_traceback,
|
||||
"user_id": user_id,
|
||||
"project_id": project_id,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -253,7 +259,9 @@ class EventServiceBase:
|
||||
"status": session_queue_item.status,
|
||||
"batch_id": session_queue_item.batch_id,
|
||||
"session_id": session_queue_item.session_id,
|
||||
"error": session_queue_item.error,
|
||||
"error_type": session_queue_item.error_type,
|
||||
"error_message": session_queue_item.error_message,
|
||||
"error_traceback": session_queue_item.error_traceback,
|
||||
"created_at": str(session_queue_item.created_at) if session_queue_item.created_at else None,
|
||||
"updated_at": str(session_queue_item.updated_at) if session_queue_item.updated_at else None,
|
||||
"started_at": str(session_queue_item.started_at) if session_queue_item.started_at else None,
|
||||
|
||||
@@ -4,9 +4,6 @@ from typing import Optional
|
||||
|
||||
from PIL.Image import Image as PILImageType
|
||||
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||
|
||||
|
||||
class ImageFileStorageBase(ABC):
|
||||
"""Low-level service responsible for storing and retrieving image files."""
|
||||
@@ -33,8 +30,9 @@ class ImageFileStorageBase(ABC):
|
||||
self,
|
||||
image: PILImageType,
|
||||
image_name: str,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
workflow: Optional[WorkflowWithoutID] = None,
|
||||
metadata: Optional[str] = None,
|
||||
workflow: Optional[str] = None,
|
||||
graph: Optional[str] = None,
|
||||
thumbnail_size: int = 256,
|
||||
) -> None:
|
||||
"""Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image name, thumbnail name, and created timestamp."""
|
||||
@@ -46,6 +44,11 @@ class ImageFileStorageBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||
def get_workflow(self, image_name: str) -> Optional[str]:
|
||||
"""Gets the workflow of an image."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_graph(self, image_name: str) -> Optional[str]:
|
||||
"""Gets the graph of an image."""
|
||||
pass
|
||||
|
||||
@@ -7,9 +7,7 @@ from PIL import Image, PngImagePlugin
|
||||
from PIL.Image import Image as PILImageType
|
||||
from send2trash import send2trash
|
||||
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||
from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail
|
||||
|
||||
from .image_files_base import ImageFileStorageBase
|
||||
@@ -56,8 +54,9 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
self,
|
||||
image: PILImageType,
|
||||
image_name: str,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
workflow: Optional[WorkflowWithoutID] = None,
|
||||
metadata: Optional[str] = None,
|
||||
workflow: Optional[str] = None,
|
||||
graph: Optional[str] = None,
|
||||
thumbnail_size: int = 256,
|
||||
) -> None:
|
||||
try:
|
||||
@@ -68,13 +67,14 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
info_dict = {}
|
||||
|
||||
if metadata is not None:
|
||||
metadata_json = metadata.model_dump_json()
|
||||
info_dict["invokeai_metadata"] = metadata_json
|
||||
pnginfo.add_text("invokeai_metadata", metadata_json)
|
||||
info_dict["invokeai_metadata"] = metadata
|
||||
pnginfo.add_text("invokeai_metadata", metadata)
|
||||
if workflow is not None:
|
||||
workflow_json = workflow.model_dump_json()
|
||||
info_dict["invokeai_workflow"] = workflow_json
|
||||
pnginfo.add_text("invokeai_workflow", workflow_json)
|
||||
info_dict["invokeai_workflow"] = workflow
|
||||
pnginfo.add_text("invokeai_workflow", workflow)
|
||||
if graph is not None:
|
||||
info_dict["invokeai_graph"] = graph
|
||||
pnginfo.add_text("invokeai_graph", graph)
|
||||
|
||||
# When saving the image, the image object's info field is not populated. We need to set it
|
||||
image.info = info_dict
|
||||
@@ -129,11 +129,18 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
path = path if isinstance(path, Path) else Path(path)
|
||||
return path.exists()
|
||||
|
||||
def get_workflow(self, image_name: str) -> WorkflowWithoutID | None:
|
||||
def get_workflow(self, image_name: str) -> str | None:
|
||||
image = self.get(image_name)
|
||||
workflow = image.info.get("invokeai_workflow", None)
|
||||
if workflow is not None:
|
||||
return WorkflowWithoutID.model_validate_json(workflow)
|
||||
if isinstance(workflow, str):
|
||||
return workflow
|
||||
return None
|
||||
|
||||
def get_graph(self, image_name: str) -> str | None:
|
||||
image = self.get(image_name)
|
||||
graph = image.info.get("invokeai_graph", None)
|
||||
if isinstance(graph, str):
|
||||
return graph
|
||||
return None
|
||||
|
||||
def __validate_storage_folders(self) -> None:
|
||||
|
||||
@@ -80,7 +80,7 @@ class ImageRecordStorageBase(ABC):
|
||||
starred: Optional[bool] = False,
|
||||
session_id: Optional[str] = None,
|
||||
node_id: Optional[str] = None,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
metadata: Optional[str] = None,
|
||||
) -> datetime:
|
||||
"""Saves an image record."""
|
||||
pass
|
||||
|
||||
@@ -328,10 +328,9 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
starred: Optional[bool] = False,
|
||||
session_id: Optional[str] = None,
|
||||
node_id: Optional[str] = None,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
metadata: Optional[str] = None,
|
||||
) -> datetime:
|
||||
try:
|
||||
metadata_json = metadata.model_dump_json() if metadata is not None else None
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
@@ -358,7 +357,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
height,
|
||||
node_id,
|
||||
session_id,
|
||||
metadata_json,
|
||||
metadata,
|
||||
is_intermediate,
|
||||
starred,
|
||||
has_workflow,
|
||||
|
||||
@@ -12,7 +12,6 @@ from invokeai.app.services.image_records.image_records_common import (
|
||||
)
|
||||
from invokeai.app.services.images.images_common import ImageDTO
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||
|
||||
|
||||
class ImageServiceABC(ABC):
|
||||
@@ -51,8 +50,9 @@ class ImageServiceABC(ABC):
|
||||
session_id: Optional[str] = None,
|
||||
board_id: Optional[str] = None,
|
||||
is_intermediate: Optional[bool] = False,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
workflow: Optional[WorkflowWithoutID] = None,
|
||||
metadata: Optional[str] = None,
|
||||
workflow: Optional[str] = None,
|
||||
graph: Optional[str] = None,
|
||||
) -> ImageDTO:
|
||||
"""Creates an image, storing the file and its metadata."""
|
||||
pass
|
||||
@@ -87,7 +87,12 @@ class ImageServiceABC(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||
def get_workflow(self, image_name: str) -> Optional[str]:
|
||||
"""Gets an image's workflow."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_graph(self, image_name: str) -> Optional[str]:
|
||||
"""Gets an image's workflow."""
|
||||
pass
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ from PIL.Image import Image as PILImageType
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||
|
||||
from ..image_files.image_files_common import (
|
||||
ImageFileDeleteException,
|
||||
@@ -42,8 +41,9 @@ class ImageService(ImageServiceABC):
|
||||
session_id: Optional[str] = None,
|
||||
board_id: Optional[str] = None,
|
||||
is_intermediate: Optional[bool] = False,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
workflow: Optional[WorkflowWithoutID] = None,
|
||||
metadata: Optional[str] = None,
|
||||
workflow: Optional[str] = None,
|
||||
graph: Optional[str] = None,
|
||||
) -> ImageDTO:
|
||||
if image_origin not in ResourceOrigin:
|
||||
raise InvalidOriginException
|
||||
@@ -64,7 +64,7 @@ class ImageService(ImageServiceABC):
|
||||
image_category=image_category,
|
||||
width=width,
|
||||
height=height,
|
||||
has_workflow=workflow is not None,
|
||||
has_workflow=workflow is not None or graph is not None,
|
||||
# Meta fields
|
||||
is_intermediate=is_intermediate,
|
||||
# Nullable fields
|
||||
@@ -75,7 +75,7 @@ class ImageService(ImageServiceABC):
|
||||
if board_id is not None:
|
||||
self.__invoker.services.board_image_records.add_image_to_board(board_id=board_id, image_name=image_name)
|
||||
self.__invoker.services.image_files.save(
|
||||
image_name=image_name, image=image, metadata=metadata, workflow=workflow
|
||||
image_name=image_name, image=image, metadata=metadata, workflow=workflow, graph=graph
|
||||
)
|
||||
image_dto = self.get_dto(image_name)
|
||||
|
||||
@@ -157,7 +157,7 @@ class ImageService(ImageServiceABC):
|
||||
self.__invoker.services.logger.error("Problem getting image metadata")
|
||||
raise e
|
||||
|
||||
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||
def get_workflow(self, image_name: str) -> Optional[str]:
|
||||
try:
|
||||
return self.__invoker.services.image_files.get_workflow(image_name)
|
||||
except ImageFileNotFoundException:
|
||||
@@ -167,6 +167,16 @@ class ImageService(ImageServiceABC):
|
||||
self.__invoker.services.logger.error("Problem getting image workflow")
|
||||
raise
|
||||
|
||||
def get_graph(self, image_name: str) -> Optional[str]:
|
||||
try:
|
||||
return self.__invoker.services.image_files.get_graph(image_name)
|
||||
except ImageFileNotFoundException:
|
||||
self.__invoker.services.logger.error("Image file not found")
|
||||
raise
|
||||
except Exception:
|
||||
self.__invoker.services.logger.error("Problem getting image graph")
|
||||
raise
|
||||
|
||||
def get_path(self, image_name: str, thumbnail: bool = False) -> str:
|
||||
try:
|
||||
return str(self.__invoker.services.image_files.get_path(image_name, thumbnail))
|
||||
|
||||
@@ -1,6 +1,49 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from threading import Event
|
||||
from typing import Optional, Protocol
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput
|
||||
from invokeai.app.services.invocation_services import InvocationServices
|
||||
from invokeai.app.services.session_processor.session_processor_common import SessionProcessorStatus
|
||||
from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem
|
||||
from invokeai.app.util.profiler import Profiler
|
||||
|
||||
|
||||
class SessionRunnerBase(ABC):
|
||||
"""
|
||||
Base class for session runner.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def start(self, services: InvocationServices, cancel_event: Event, profiler: Optional[Profiler] = None) -> None:
|
||||
"""Starts the session runner.
|
||||
|
||||
Args:
|
||||
services: The invocation services.
|
||||
cancel_event: The cancel event.
|
||||
profiler: The profiler to use for session profiling via cProfile. Omit to disable profiling. Basic session
|
||||
stats will be still be recorded and logged when profiling is disabled.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def run(self, queue_item: SessionQueueItem) -> None:
|
||||
"""Runs a session.
|
||||
|
||||
Args:
|
||||
queue_item: The session to run.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def run_node(self, invocation: BaseInvocation, queue_item: SessionQueueItem) -> None:
|
||||
"""Run a single node in the graph.
|
||||
|
||||
Args:
|
||||
invocation: The invocation to run.
|
||||
queue_item: The session queue item.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class SessionProcessorBase(ABC):
|
||||
@@ -26,3 +69,85 @@ class SessionProcessorBase(ABC):
|
||||
def get_status(self) -> SessionProcessorStatus:
|
||||
"""Gets the status of the session processor"""
|
||||
pass
|
||||
|
||||
|
||||
class OnBeforeRunNode(Protocol):
|
||||
def __call__(self, invocation: BaseInvocation, queue_item: SessionQueueItem) -> None:
|
||||
"""Callback to run before executing a node.
|
||||
|
||||
Args:
|
||||
invocation: The invocation that will be executed.
|
||||
queue_item: The session queue item.
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class OnAfterRunNode(Protocol):
|
||||
def __call__(self, invocation: BaseInvocation, queue_item: SessionQueueItem, output: BaseInvocationOutput) -> None:
|
||||
"""Callback to run before executing a node.
|
||||
|
||||
Args:
|
||||
invocation: The invocation that was executed.
|
||||
queue_item: The session queue item.
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class OnNodeError(Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
invocation: BaseInvocation,
|
||||
queue_item: SessionQueueItem,
|
||||
error_type: str,
|
||||
error_message: str,
|
||||
error_traceback: str,
|
||||
) -> None:
|
||||
"""Callback to run when a node has an error.
|
||||
|
||||
Args:
|
||||
invocation: The invocation that errored.
|
||||
queue_item: The session queue item.
|
||||
error_type: The type of error, e.g. "ValueError".
|
||||
error_message: The error message, e.g. "Invalid value".
|
||||
error_traceback: The stringified error traceback.
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class OnBeforeRunSession(Protocol):
|
||||
def __call__(self, queue_item: SessionQueueItem) -> None:
|
||||
"""Callback to run before executing a session.
|
||||
|
||||
Args:
|
||||
queue_item: The session queue item.
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class OnAfterRunSession(Protocol):
|
||||
def __call__(self, queue_item: SessionQueueItem) -> None:
|
||||
"""Callback to run after executing a session.
|
||||
|
||||
Args:
|
||||
queue_item: The session queue item.
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class OnNonFatalProcessorError(Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
queue_item: Optional[SessionQueueItem],
|
||||
error_type: str,
|
||||
error_message: str,
|
||||
error_traceback: str,
|
||||
) -> None:
|
||||
"""Callback to run when a non-fatal error occurs in the processor.
|
||||
|
||||
Args:
|
||||
queue_item: The session queue item, if one was being executed when the error occurred.
|
||||
error_type: The type of error, e.g. "ValueError".
|
||||
error_message: The error message, e.g. "Invalid value".
|
||||
error_traceback: The stringified error traceback.
|
||||
"""
|
||||
...
|
||||
|
||||
@@ -7,21 +7,305 @@ from typing import Optional
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
from fastapi_events.typing import Event as FastAPIEvent
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.services.invocation_stats.invocation_stats_common import GESStatsNotFoundError
|
||||
from invokeai.app.services.session_processor.session_processor_base import (
|
||||
OnAfterRunNode,
|
||||
OnAfterRunSession,
|
||||
OnBeforeRunNode,
|
||||
OnBeforeRunSession,
|
||||
OnNodeError,
|
||||
OnNonFatalProcessorError,
|
||||
)
|
||||
from invokeai.app.services.session_processor.session_processor_common import CanceledException
|
||||
from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem
|
||||
from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem, SessionQueueItemNotFoundError
|
||||
from invokeai.app.services.shared.graph import NodeInputError
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContextData, build_invocation_context
|
||||
from invokeai.app.util.profiler import Profiler
|
||||
|
||||
from ..invoker import Invoker
|
||||
from .session_processor_base import SessionProcessorBase
|
||||
from .session_processor_base import InvocationServices, SessionProcessorBase, SessionRunnerBase
|
||||
from .session_processor_common import SessionProcessorStatus
|
||||
|
||||
|
||||
class DefaultSessionRunner(SessionRunnerBase):
|
||||
"""Processes a single session's invocations."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
on_before_run_session_callbacks: Optional[list[OnBeforeRunSession]] = None,
|
||||
on_before_run_node_callbacks: Optional[list[OnBeforeRunNode]] = None,
|
||||
on_after_run_node_callbacks: Optional[list[OnAfterRunNode]] = None,
|
||||
on_node_error_callbacks: Optional[list[OnNodeError]] = None,
|
||||
on_after_run_session_callbacks: Optional[list[OnAfterRunSession]] = None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
on_before_run_session_callbacks: Callbacks to run before the session starts.
|
||||
on_before_run_node_callbacks: Callbacks to run before each node starts.
|
||||
on_after_run_node_callbacks: Callbacks to run after each node completes.
|
||||
on_node_error_callbacks: Callbacks to run when a node errors.
|
||||
on_after_run_session_callbacks: Callbacks to run after the session completes.
|
||||
"""
|
||||
|
||||
self._on_before_run_session_callbacks = on_before_run_session_callbacks or []
|
||||
self._on_before_run_node_callbacks = on_before_run_node_callbacks or []
|
||||
self._on_after_run_node_callbacks = on_after_run_node_callbacks or []
|
||||
self._on_node_error_callbacks = on_node_error_callbacks or []
|
||||
self._on_after_run_session_callbacks = on_after_run_session_callbacks or []
|
||||
|
||||
def start(self, services: InvocationServices, cancel_event: ThreadEvent, profiler: Optional[Profiler] = None):
|
||||
self._services = services
|
||||
self._cancel_event = cancel_event
|
||||
self._profiler = profiler
|
||||
|
||||
def run(self, queue_item: SessionQueueItem):
|
||||
# Exceptions raised outside `run_node` are handled by the processor. There is no need to catch them here.
|
||||
|
||||
self._on_before_run_session(queue_item=queue_item)
|
||||
|
||||
# Loop over invocations until the session is complete or canceled
|
||||
while True:
|
||||
try:
|
||||
invocation = queue_item.session.next()
|
||||
# Anything other than a `NodeInputError` is handled as a processor error
|
||||
except NodeInputError as e:
|
||||
error_type = e.__class__.__name__
|
||||
error_message = str(e)
|
||||
error_traceback = traceback.format_exc()
|
||||
self._on_node_error(
|
||||
invocation=e.node,
|
||||
queue_item=queue_item,
|
||||
error_type=error_type,
|
||||
error_message=error_message,
|
||||
error_traceback=error_traceback,
|
||||
)
|
||||
break
|
||||
|
||||
if invocation is None or self._cancel_event.is_set():
|
||||
break
|
||||
|
||||
self.run_node(invocation, queue_item)
|
||||
|
||||
# The session is complete if all invocations have been run or there is an error on the session.
|
||||
if queue_item.session.is_complete() or self._cancel_event.is_set():
|
||||
break
|
||||
|
||||
self._on_after_run_session(queue_item=queue_item)
|
||||
|
||||
def run_node(self, invocation: BaseInvocation, queue_item: SessionQueueItem):
|
||||
try:
|
||||
# Any unhandled exception in this scope is an invocation error & will fail the graph
|
||||
with self._services.performance_statistics.collect_stats(invocation, queue_item.session_id):
|
||||
self._on_before_run_node(invocation, queue_item)
|
||||
|
||||
data = InvocationContextData(
|
||||
invocation=invocation,
|
||||
source_invocation_id=queue_item.session.prepared_source_mapping[invocation.id],
|
||||
queue_item=queue_item,
|
||||
)
|
||||
context = build_invocation_context(
|
||||
data=data,
|
||||
services=self._services,
|
||||
cancel_event=self._cancel_event,
|
||||
)
|
||||
|
||||
# Invoke the node
|
||||
output = invocation.invoke_internal(context=context, services=self._services)
|
||||
# Save output and history
|
||||
queue_item.session.complete(invocation.id, output)
|
||||
|
||||
self._on_after_run_node(invocation, queue_item, output)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
# TODO(psyche): This is expected to be caught in the main thread. Do we need to catch this here?
|
||||
pass
|
||||
except CanceledException:
|
||||
# When the user cancels the graph, we first set the cancel event. The event is checked
|
||||
# between invocations, in this loop. Some invocations are long-running, and we need to
|
||||
# be able to cancel them mid-execution.
|
||||
#
|
||||
# For example, denoising is a long-running invocation with many steps. A step callback
|
||||
# is executed after each step. This step callback checks if the canceled event is set,
|
||||
# then raises a CanceledException to stop execution immediately.
|
||||
#
|
||||
# When we get a CanceledException, we don't need to do anything - just pass and let the
|
||||
# loop go to its next iteration, and the cancel event will be handled correctly.
|
||||
pass
|
||||
except Exception as e:
|
||||
error_type = e.__class__.__name__
|
||||
error_message = str(e)
|
||||
error_traceback = traceback.format_exc()
|
||||
self._on_node_error(
|
||||
invocation=invocation,
|
||||
queue_item=queue_item,
|
||||
error_type=error_type,
|
||||
error_message=error_message,
|
||||
error_traceback=error_traceback,
|
||||
)
|
||||
|
||||
def _on_before_run_session(self, queue_item: SessionQueueItem) -> None:
|
||||
"""Run before a session is executed"""
|
||||
|
||||
self._services.logger.debug(
|
||||
f"On before run session: queue item {queue_item.item_id}, session {queue_item.session_id}"
|
||||
)
|
||||
|
||||
# If profiling is enabled, start the profiler
|
||||
if self._profiler is not None:
|
||||
self._profiler.start(profile_id=queue_item.session_id)
|
||||
|
||||
for callback in self._on_before_run_session_callbacks:
|
||||
callback(queue_item=queue_item)
|
||||
|
||||
def _on_after_run_session(self, queue_item: SessionQueueItem) -> None:
|
||||
"""Run after a session is executed"""
|
||||
|
||||
self._services.logger.debug(
|
||||
f"On after run session: queue item {queue_item.item_id}, session {queue_item.session_id}"
|
||||
)
|
||||
|
||||
# If we are profiling, stop the profiler and dump the profile & stats
|
||||
if self._profiler is not None:
|
||||
profile_path = self._profiler.stop()
|
||||
stats_path = profile_path.with_suffix(".json")
|
||||
self._services.performance_statistics.dump_stats(
|
||||
graph_execution_state_id=queue_item.session.id, output_path=stats_path
|
||||
)
|
||||
|
||||
try:
|
||||
# Update the queue item with the completed session. If the queue item has been removed from the queue,
|
||||
# we'll get a SessionQueueItemNotFoundError and we can ignore it. This can happen if the queue is cleared
|
||||
# while the session is running.
|
||||
queue_item = self._services.session_queue.set_queue_item_session(queue_item.item_id, queue_item.session)
|
||||
|
||||
# TODO(psyche): This feels jumbled - we should review separation of concerns here.
|
||||
# Send complete event. The events service will receive this and update the queue item's status.
|
||||
self._services.events.emit_graph_execution_complete(
|
||||
queue_batch_id=queue_item.batch_id,
|
||||
queue_item_id=queue_item.item_id,
|
||||
queue_id=queue_item.queue_id,
|
||||
graph_execution_state_id=queue_item.session.id,
|
||||
)
|
||||
|
||||
# We'll get a GESStatsNotFoundError if we try to log stats for an untracked graph, but in the processor
|
||||
# we don't care about that - suppress the error.
|
||||
with suppress(GESStatsNotFoundError):
|
||||
self._services.performance_statistics.log_stats(queue_item.session.id)
|
||||
self._services.performance_statistics.reset_stats()
|
||||
|
||||
for callback in self._on_after_run_session_callbacks:
|
||||
callback(queue_item=queue_item)
|
||||
except SessionQueueItemNotFoundError:
|
||||
pass
|
||||
|
||||
def _on_before_run_node(self, invocation: BaseInvocation, queue_item: SessionQueueItem):
|
||||
"""Run before a node is executed"""
|
||||
|
||||
self._services.logger.debug(
|
||||
f"On before run node: queue item {queue_item.item_id}, session {queue_item.session_id}, node {invocation.id} ({invocation.get_type()})"
|
||||
)
|
||||
|
||||
# Send starting event
|
||||
self._services.events.emit_invocation_started(
|
||||
queue_batch_id=queue_item.batch_id,
|
||||
queue_item_id=queue_item.item_id,
|
||||
queue_id=queue_item.queue_id,
|
||||
graph_execution_state_id=queue_item.session_id,
|
||||
node=invocation.model_dump(),
|
||||
source_node_id=queue_item.session.prepared_source_mapping[invocation.id],
|
||||
)
|
||||
|
||||
for callback in self._on_before_run_node_callbacks:
|
||||
callback(invocation=invocation, queue_item=queue_item)
|
||||
|
||||
def _on_after_run_node(
|
||||
self, invocation: BaseInvocation, queue_item: SessionQueueItem, output: BaseInvocationOutput
|
||||
):
|
||||
"""Run after a node is executed"""
|
||||
|
||||
self._services.logger.debug(
|
||||
f"On after run node: queue item {queue_item.item_id}, session {queue_item.session_id}, node {invocation.id} ({invocation.get_type()})"
|
||||
)
|
||||
|
||||
# Send complete event on successful runs
|
||||
self._services.events.emit_invocation_complete(
|
||||
queue_batch_id=queue_item.batch_id,
|
||||
queue_item_id=queue_item.item_id,
|
||||
queue_id=queue_item.queue_id,
|
||||
graph_execution_state_id=queue_item.session.id,
|
||||
node=invocation.model_dump(),
|
||||
source_node_id=queue_item.session.prepared_source_mapping[invocation.id],
|
||||
result=output.model_dump(),
|
||||
)
|
||||
|
||||
for callback in self._on_after_run_node_callbacks:
|
||||
callback(invocation=invocation, queue_item=queue_item, output=output)
|
||||
|
||||
def _on_node_error(
|
||||
self,
|
||||
invocation: BaseInvocation,
|
||||
queue_item: SessionQueueItem,
|
||||
error_type: str,
|
||||
error_message: str,
|
||||
error_traceback: str,
|
||||
):
|
||||
"""Run when a node errors"""
|
||||
|
||||
self._services.logger.debug(
|
||||
f"On node error: queue item {queue_item.item_id}, session {queue_item.session_id}, node {invocation.id} ({invocation.get_type()})"
|
||||
)
|
||||
|
||||
# Node errors do not get the full traceback. Only the queue item gets the full traceback.
|
||||
node_error = f"{error_type}: {error_message}"
|
||||
queue_item.session.set_node_error(invocation.id, node_error)
|
||||
self._services.logger.error(
|
||||
f"Error while invoking session {queue_item.session_id}, invocation {invocation.id} ({invocation.get_type()}): {error_message}"
|
||||
)
|
||||
self._services.logger.error(error_traceback)
|
||||
|
||||
# Send error event
|
||||
self._services.events.emit_invocation_error(
|
||||
queue_batch_id=queue_item.session_id,
|
||||
queue_item_id=queue_item.item_id,
|
||||
queue_id=queue_item.queue_id,
|
||||
graph_execution_state_id=queue_item.session.id,
|
||||
node=invocation.model_dump(),
|
||||
source_node_id=queue_item.session.prepared_source_mapping[invocation.id],
|
||||
error_type=error_type,
|
||||
error_message=error_message,
|
||||
error_traceback=error_traceback,
|
||||
user_id=getattr(queue_item, "user_id", None),
|
||||
project_id=getattr(queue_item, "project_id", None),
|
||||
)
|
||||
|
||||
for callback in self._on_node_error_callbacks:
|
||||
callback(
|
||||
invocation=invocation,
|
||||
queue_item=queue_item,
|
||||
error_type=error_type,
|
||||
error_message=error_message,
|
||||
error_traceback=error_traceback,
|
||||
)
|
||||
|
||||
|
||||
class DefaultSessionProcessor(SessionProcessorBase):
|
||||
def start(self, invoker: Invoker, thread_limit: int = 1, polling_interval: int = 1) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
session_runner: Optional[SessionRunnerBase] = None,
|
||||
on_non_fatal_processor_error_callbacks: Optional[list[OnNonFatalProcessorError]] = None,
|
||||
thread_limit: int = 1,
|
||||
polling_interval: int = 1,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.session_runner = session_runner if session_runner else DefaultSessionRunner()
|
||||
self._on_non_fatal_processor_error_callbacks = on_non_fatal_processor_error_callbacks or []
|
||||
self._thread_limit = thread_limit
|
||||
self._polling_interval = polling_interval
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self._invoker: Invoker = invoker
|
||||
self._queue_item: Optional[SessionQueueItem] = None
|
||||
self._invocation: Optional[BaseInvocation] = None
|
||||
@@ -33,9 +317,7 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
|
||||
local_handler.register(event_name=EventServiceBase.queue_event, _func=self._on_queue_event)
|
||||
|
||||
self._thread_limit = thread_limit
|
||||
self._thread_semaphore = BoundedSemaphore(thread_limit)
|
||||
self._polling_interval = polling_interval
|
||||
self._thread_semaphore = BoundedSemaphore(self._thread_limit)
|
||||
|
||||
# If profiling is enabled, create a profiler. The same profiler will be used for all sessions. Internally,
|
||||
# the profiler will create a new profile for each session.
|
||||
@@ -49,6 +331,7 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
else None
|
||||
)
|
||||
|
||||
self.session_runner.start(services=invoker.services, cancel_event=self._cancel_event, profiler=self._profiler)
|
||||
self._thread = Thread(
|
||||
name="session_processor",
|
||||
target=self._process,
|
||||
@@ -91,6 +374,7 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
"failed",
|
||||
"canceled",
|
||||
]:
|
||||
self._cancel_event.set()
|
||||
self._poll_now()
|
||||
|
||||
def resume(self) -> SessionProcessorStatus:
|
||||
@@ -116,8 +400,8 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
resume_event: ThreadEvent,
|
||||
cancel_event: ThreadEvent,
|
||||
):
|
||||
# Outermost processor try block; any unhandled exception is a fatal processor error
|
||||
try:
|
||||
# Any unhandled exception in this block is a fatal processor error and will stop the processor.
|
||||
self._thread_semaphore.acquire()
|
||||
stop_event.clear()
|
||||
resume_event.set()
|
||||
@@ -125,8 +409,8 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
|
||||
while not stop_event.is_set():
|
||||
poll_now_event.clear()
|
||||
# Middle processor try block; any unhandled exception is a non-fatal processor error
|
||||
try:
|
||||
# Any unhandled exception in this block is a nonfatal processor error and will be handled.
|
||||
# If we are paused, wait for resume event
|
||||
resume_event.wait()
|
||||
|
||||
@@ -142,157 +426,62 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
self._invoker.services.logger.debug(f"Executing queue item {self._queue_item.item_id}")
|
||||
cancel_event.clear()
|
||||
|
||||
# If profiling is enabled, start the profiler
|
||||
if self._profiler is not None:
|
||||
self._profiler.start(profile_id=self._queue_item.session_id)
|
||||
# Run the graph
|
||||
self.session_runner.run(queue_item=self._queue_item)
|
||||
|
||||
# Prepare invocations and take the first
|
||||
self._invocation = self._queue_item.session.next()
|
||||
|
||||
# Loop over invocations until the session is complete or canceled
|
||||
while self._invocation is not None and not cancel_event.is_set():
|
||||
# get the source node id to provide to clients (the prepared node id is not as useful)
|
||||
source_invocation_id = self._queue_item.session.prepared_source_mapping[self._invocation.id]
|
||||
|
||||
# Send starting event
|
||||
self._invoker.services.events.emit_invocation_started(
|
||||
queue_batch_id=self._queue_item.batch_id,
|
||||
queue_item_id=self._queue_item.item_id,
|
||||
queue_id=self._queue_item.queue_id,
|
||||
graph_execution_state_id=self._queue_item.session_id,
|
||||
node=self._invocation.model_dump(),
|
||||
source_node_id=source_invocation_id,
|
||||
)
|
||||
|
||||
# Innermost processor try block; any unhandled exception is an invocation error & will fail the graph
|
||||
try:
|
||||
with self._invoker.services.performance_statistics.collect_stats(
|
||||
self._invocation, self._queue_item.session.id
|
||||
):
|
||||
# Build invocation context (the node-facing API)
|
||||
data = InvocationContextData(
|
||||
invocation=self._invocation,
|
||||
source_invocation_id=source_invocation_id,
|
||||
queue_item=self._queue_item,
|
||||
)
|
||||
context = build_invocation_context(
|
||||
data=data,
|
||||
services=self._invoker.services,
|
||||
cancel_event=self._cancel_event,
|
||||
)
|
||||
|
||||
# Invoke the node
|
||||
outputs = self._invocation.invoke_internal(
|
||||
context=context, services=self._invoker.services
|
||||
)
|
||||
|
||||
# Save outputs and history
|
||||
self._queue_item.session.complete(self._invocation.id, outputs)
|
||||
|
||||
# Send complete event
|
||||
self._invoker.services.events.emit_invocation_complete(
|
||||
queue_batch_id=self._queue_item.batch_id,
|
||||
queue_item_id=self._queue_item.item_id,
|
||||
queue_id=self._queue_item.queue_id,
|
||||
graph_execution_state_id=self._queue_item.session.id,
|
||||
node=self._invocation.model_dump(),
|
||||
source_node_id=source_invocation_id,
|
||||
result=outputs.model_dump(),
|
||||
)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
# TODO(MM2): Create an event for this
|
||||
pass
|
||||
|
||||
except CanceledException:
|
||||
# When the user cancels the graph, we first set the cancel event. The event is checked
|
||||
# between invocations, in this loop. Some invocations are long-running, and we need to
|
||||
# be able to cancel them mid-execution.
|
||||
#
|
||||
# For example, denoising is a long-running invocation with many steps. A step callback
|
||||
# is executed after each step. This step callback checks if the canceled event is set,
|
||||
# then raises a CanceledException to stop execution immediately.
|
||||
#
|
||||
# When we get a CanceledException, we don't need to do anything - just pass and let the
|
||||
# loop go to its next iteration, and the cancel event will be handled correctly.
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
error = traceback.format_exc()
|
||||
|
||||
# Save error
|
||||
self._queue_item.session.set_node_error(self._invocation.id, error)
|
||||
self._invoker.services.logger.error(
|
||||
f"Error while invoking session {self._queue_item.session_id}, invocation {self._invocation.id} ({self._invocation.get_type()}):\n{e}"
|
||||
)
|
||||
self._invoker.services.logger.error(error)
|
||||
|
||||
# Send error event
|
||||
self._invoker.services.events.emit_invocation_error(
|
||||
queue_batch_id=self._queue_item.session_id,
|
||||
queue_item_id=self._queue_item.item_id,
|
||||
queue_id=self._queue_item.queue_id,
|
||||
graph_execution_state_id=self._queue_item.session.id,
|
||||
node=self._invocation.model_dump(),
|
||||
source_node_id=source_invocation_id,
|
||||
error_type=e.__class__.__name__,
|
||||
error=error,
|
||||
)
|
||||
pass
|
||||
|
||||
# The session is complete if the all invocations are complete or there was an error
|
||||
if self._queue_item.session.is_complete() or cancel_event.is_set():
|
||||
# Send complete event
|
||||
self._invoker.services.events.emit_graph_execution_complete(
|
||||
queue_batch_id=self._queue_item.batch_id,
|
||||
queue_item_id=self._queue_item.item_id,
|
||||
queue_id=self._queue_item.queue_id,
|
||||
graph_execution_state_id=self._queue_item.session.id,
|
||||
)
|
||||
# If we are profiling, stop the profiler and dump the profile & stats
|
||||
if self._profiler:
|
||||
profile_path = self._profiler.stop()
|
||||
stats_path = profile_path.with_suffix(".json")
|
||||
self._invoker.services.performance_statistics.dump_stats(
|
||||
graph_execution_state_id=self._queue_item.session.id, output_path=stats_path
|
||||
)
|
||||
# We'll get a GESStatsNotFoundError if we try to log stats for an untracked graph, but in the processor
|
||||
# we don't care about that - suppress the error.
|
||||
with suppress(GESStatsNotFoundError):
|
||||
self._invoker.services.performance_statistics.log_stats(self._queue_item.session.id)
|
||||
self._invoker.services.performance_statistics.reset_stats()
|
||||
|
||||
# Set the invocation to None to prepare for the next session
|
||||
self._invocation = None
|
||||
else:
|
||||
# Prepare the next invocation
|
||||
self._invocation = self._queue_item.session.next()
|
||||
else:
|
||||
# The queue was empty, wait for next polling interval or event to try again
|
||||
self._invoker.services.logger.debug("Waiting for next polling interval or event")
|
||||
poll_now_event.wait(self._polling_interval)
|
||||
continue
|
||||
except Exception:
|
||||
# Non-fatal error in processor
|
||||
self._invoker.services.logger.error(
|
||||
f"Non-fatal error in session processor:\n{traceback.format_exc()}"
|
||||
except Exception as e:
|
||||
error_type = e.__class__.__name__
|
||||
error_message = str(e)
|
||||
error_traceback = traceback.format_exc()
|
||||
self._on_non_fatal_processor_error(
|
||||
queue_item=self._queue_item,
|
||||
error_type=error_type,
|
||||
error_message=error_message,
|
||||
error_traceback=error_traceback,
|
||||
)
|
||||
# Cancel the queue item
|
||||
if self._queue_item is not None:
|
||||
self._invoker.services.session_queue.cancel_queue_item(
|
||||
self._queue_item.item_id, error=traceback.format_exc()
|
||||
)
|
||||
# Reset the invocation to None to prepare for the next session
|
||||
self._invocation = None
|
||||
# Immediately poll for next queue item
|
||||
# Wait for next polling interval or event to try again
|
||||
poll_now_event.wait(self._polling_interval)
|
||||
continue
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
# Fatal error in processor, log and pass - we're done here
|
||||
self._invoker.services.logger.error(f"Fatal Error in session processor:\n{traceback.format_exc()}")
|
||||
error_type = e.__class__.__name__
|
||||
error_message = str(e)
|
||||
error_traceback = traceback.format_exc()
|
||||
self._invoker.services.logger.error(f"Fatal Error in session processor {error_type}: {error_message}")
|
||||
self._invoker.services.logger.error(error_traceback)
|
||||
pass
|
||||
finally:
|
||||
stop_event.clear()
|
||||
poll_now_event.clear()
|
||||
self._queue_item = None
|
||||
self._thread_semaphore.release()
|
||||
|
||||
def _on_non_fatal_processor_error(
|
||||
self,
|
||||
queue_item: Optional[SessionQueueItem],
|
||||
error_type: str,
|
||||
error_message: str,
|
||||
error_traceback: str,
|
||||
) -> None:
|
||||
# Non-fatal error in processor
|
||||
self._invoker.services.logger.error(f"Non-fatal error in session processor {error_type}: {error_message}")
|
||||
self._invoker.services.logger.error(error_traceback)
|
||||
|
||||
if queue_item is not None:
|
||||
# Update the queue item with the completed session
|
||||
self._invoker.services.session_queue.set_queue_item_session(queue_item.item_id, queue_item.session)
|
||||
# Fail the queue item
|
||||
self._invoker.services.session_queue.fail_queue_item(
|
||||
item_id=queue_item.item_id,
|
||||
error_type=error_type,
|
||||
error_message=error_message,
|
||||
error_traceback=error_traceback,
|
||||
)
|
||||
|
||||
for callback in self._on_non_fatal_processor_error_callbacks:
|
||||
callback(
|
||||
queue_item=queue_item,
|
||||
error_type=error_type,
|
||||
error_message=error_message,
|
||||
error_traceback=error_traceback,
|
||||
)
|
||||
|
||||
@@ -16,6 +16,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
SessionQueueItemDTO,
|
||||
SessionQueueStatus,
|
||||
)
|
||||
from invokeai.app.services.shared.graph import GraphExecutionState
|
||||
from invokeai.app.services.shared.pagination import CursorPaginatedResults
|
||||
|
||||
|
||||
@@ -73,10 +74,17 @@ class SessionQueueBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cancel_queue_item(self, item_id: int, error: Optional[str] = None) -> SessionQueueItem:
|
||||
def cancel_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||
"""Cancels a session queue item"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def fail_queue_item(
|
||||
self, item_id: int, error_type: str, error_message: str, error_traceback: str
|
||||
) -> SessionQueueItem:
|
||||
"""Fails a session queue item"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cancel_by_batch_ids(self, queue_id: str, batch_ids: list[str]) -> CancelByBatchIDsResult:
|
||||
"""Cancels all queue items with matching batch IDs"""
|
||||
@@ -103,3 +111,8 @@ class SessionQueueBase(ABC):
|
||||
def get_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||
"""Gets a session queue item by ID"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_queue_item_session(self, item_id: int, session: GraphExecutionState) -> SessionQueueItem:
|
||||
"""Sets the session for a session queue item. Use this to update the session state."""
|
||||
pass
|
||||
|
||||
@@ -3,7 +3,16 @@ import json
|
||||
from itertools import chain, product
|
||||
from typing import Generator, Iterable, Literal, NamedTuple, Optional, TypeAlias, Union, cast
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field, StrictStr, TypeAdapter, field_validator, model_validator
|
||||
from pydantic import (
|
||||
AliasChoices,
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
StrictStr,
|
||||
TypeAdapter,
|
||||
field_validator,
|
||||
model_validator,
|
||||
)
|
||||
from pydantic_core import to_jsonable_python
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||
@@ -189,7 +198,13 @@ class SessionQueueItemWithoutGraph(BaseModel):
|
||||
session_id: str = Field(
|
||||
description="The ID of the session associated with this queue item. The session doesn't exist in graph_executions until the queue item is executed."
|
||||
)
|
||||
error: Optional[str] = Field(default=None, description="The error message if this queue item errored")
|
||||
error_type: Optional[str] = Field(default=None, description="The error type if this queue item errored")
|
||||
error_message: Optional[str] = Field(default=None, description="The error message if this queue item errored")
|
||||
error_traceback: Optional[str] = Field(
|
||||
default=None,
|
||||
description="The error traceback if this queue item errored",
|
||||
validation_alias=AliasChoices("error_traceback", "error"),
|
||||
)
|
||||
created_at: Union[datetime.datetime, str] = Field(description="When this queue item was created")
|
||||
updated_at: Union[datetime.datetime, str] = Field(description="When this queue item was updated")
|
||||
started_at: Optional[Union[datetime.datetime, str]] = Field(description="When this queue item was started")
|
||||
|
||||
@@ -27,6 +27,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
calc_session_count,
|
||||
prepare_values_to_insert,
|
||||
)
|
||||
from invokeai.app.services.shared.graph import GraphExecutionState
|
||||
from invokeai.app.services.shared.pagination import CursorPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
|
||||
@@ -81,10 +82,18 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
async def _handle_error_event(self, event: FastAPIEvent) -> None:
|
||||
try:
|
||||
item_id = event[1]["data"]["queue_item_id"]
|
||||
error = event[1]["data"]["error"]
|
||||
error_type = event[1]["data"]["error_type"]
|
||||
error_message = event[1]["data"]["error_message"]
|
||||
error_traceback = event[1]["data"]["error_traceback"]
|
||||
queue_item = self.get_queue_item(item_id)
|
||||
# always set to failed if have an error, even if previously the item was marked completed or canceled
|
||||
queue_item = self._set_queue_item_status(item_id=queue_item.item_id, status="failed", error=error)
|
||||
queue_item = self._set_queue_item_status(
|
||||
item_id=queue_item.item_id,
|
||||
status="failed",
|
||||
error_type=error_type,
|
||||
error_message=error_message,
|
||||
error_traceback=error_traceback,
|
||||
)
|
||||
except SessionQueueItemNotFoundError:
|
||||
return
|
||||
|
||||
@@ -271,17 +280,22 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
return SessionQueueItem.queue_item_from_dict(dict(result))
|
||||
|
||||
def _set_queue_item_status(
|
||||
self, item_id: int, status: QUEUE_ITEM_STATUS, error: Optional[str] = None
|
||||
self,
|
||||
item_id: int,
|
||||
status: QUEUE_ITEM_STATUS,
|
||||
error_type: Optional[str] = None,
|
||||
error_message: Optional[str] = None,
|
||||
error_traceback: Optional[str] = None,
|
||||
) -> SessionQueueItem:
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
UPDATE session_queue
|
||||
SET status = ?, error = ?
|
||||
SET status = ?, error_type = ?, error_message = ?, error_traceback = ?
|
||||
WHERE item_id = ?
|
||||
""",
|
||||
(status, error, item_id),
|
||||
(status, error_type, error_message, error_traceback, item_id),
|
||||
)
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
@@ -338,26 +352,6 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
self.__lock.release()
|
||||
return IsFullResult(is_full=is_full)
|
||||
|
||||
def delete_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||
queue_item = self.get_queue_item(item_id=item_id)
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM session_queue
|
||||
WHERE
|
||||
item_id = ?
|
||||
""",
|
||||
(item_id,),
|
||||
)
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return queue_item
|
||||
|
||||
def clear(self, queue_id: str) -> ClearResult:
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
@@ -424,11 +418,34 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
self.__lock.release()
|
||||
return PruneResult(deleted=count)
|
||||
|
||||
def cancel_queue_item(self, item_id: int, error: Optional[str] = None) -> SessionQueueItem:
|
||||
def cancel_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||
queue_item = self.get_queue_item(item_id)
|
||||
if queue_item.status not in ["canceled", "failed", "completed"]:
|
||||
status = "failed" if error is not None else "canceled"
|
||||
queue_item = self._set_queue_item_status(item_id=item_id, status=status, error=error) # type: ignore [arg-type] # mypy seems to not narrow the Literals here
|
||||
queue_item = self._set_queue_item_status(item_id=item_id, status="canceled")
|
||||
self.__invoker.services.events.emit_session_canceled(
|
||||
queue_item_id=queue_item.item_id,
|
||||
queue_id=queue_item.queue_id,
|
||||
queue_batch_id=queue_item.batch_id,
|
||||
graph_execution_state_id=queue_item.session_id,
|
||||
)
|
||||
return queue_item
|
||||
|
||||
def fail_queue_item(
|
||||
self,
|
||||
item_id: int,
|
||||
error_type: str,
|
||||
error_message: str,
|
||||
error_traceback: str,
|
||||
) -> SessionQueueItem:
|
||||
queue_item = self.get_queue_item(item_id)
|
||||
if queue_item.status not in ["canceled", "failed", "completed"]:
|
||||
queue_item = self._set_queue_item_status(
|
||||
item_id=item_id,
|
||||
status="failed",
|
||||
error_type=error_type,
|
||||
error_message=error_message,
|
||||
error_traceback=error_traceback,
|
||||
)
|
||||
self.__invoker.services.events.emit_session_canceled(
|
||||
queue_item_id=queue_item.item_id,
|
||||
queue_id=queue_item.queue_id,
|
||||
@@ -562,6 +579,29 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
|
||||
return SessionQueueItem.queue_item_from_dict(dict(result))
|
||||
|
||||
def set_queue_item_session(self, item_id: int, session: GraphExecutionState) -> SessionQueueItem:
|
||||
try:
|
||||
# Use exclude_none so we don't end up with a bunch of nulls in the graph - this can cause validation errors
|
||||
# when the graph is loaded. Graph execution occurs purely in memory - the session saved here is not referenced
|
||||
# during execution.
|
||||
session_json = session.model_dump_json(warnings=False, exclude_none=True)
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
UPDATE session_queue
|
||||
SET session = ?
|
||||
WHERE item_id = ?
|
||||
""",
|
||||
(session_json, item_id),
|
||||
)
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return self.get_queue_item(item_id)
|
||||
|
||||
def list_queue_items(
|
||||
self,
|
||||
queue_id: str,
|
||||
@@ -578,7 +618,9 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
status,
|
||||
priority,
|
||||
field_values,
|
||||
error,
|
||||
error_type,
|
||||
error_message,
|
||||
error_traceback,
|
||||
created_at,
|
||||
updated_at,
|
||||
completed_at,
|
||||
|
||||
@@ -8,6 +8,7 @@ import networkx as nx
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
GetJsonSchemaHandler,
|
||||
ValidationError,
|
||||
field_validator,
|
||||
)
|
||||
from pydantic.fields import Field
|
||||
@@ -190,6 +191,39 @@ class UnknownGraphValidationError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class NodeInputError(ValueError):
|
||||
"""Raised when a node fails preparation. This occurs when a node's inputs are being set from its incomers, but an
|
||||
input fails validation.
|
||||
|
||||
Attributes:
|
||||
node: The node that failed preparation. Note: only successfully set fields will be accurate. Review the error to
|
||||
determine which field caused the failure.
|
||||
"""
|
||||
|
||||
def __init__(self, node: BaseInvocation, e: ValidationError):
|
||||
self.original_error = e
|
||||
self.node = node
|
||||
# When preparing a node, we set each input one-at-a-time. We may thus safely assume that the first error
|
||||
# represents the first input that failed.
|
||||
self.failed_input = loc_to_dot_sep(e.errors()[0]["loc"])
|
||||
super().__init__(f"Node {node.id} has invalid incoming input for {self.failed_input}")
|
||||
|
||||
|
||||
def loc_to_dot_sep(loc: tuple[Union[str, int], ...]) -> str:
|
||||
"""Helper to pretty-print pydantic error locations as dot-separated strings.
|
||||
Taken from https://docs.pydantic.dev/latest/errors/errors/#customize-error-messages
|
||||
"""
|
||||
path = ""
|
||||
for i, x in enumerate(loc):
|
||||
if isinstance(x, str):
|
||||
if i > 0:
|
||||
path += "."
|
||||
path += x
|
||||
else:
|
||||
path += f"[{x}]"
|
||||
return path
|
||||
|
||||
|
||||
@invocation_output("iterate_output")
|
||||
class IterateInvocationOutput(BaseInvocationOutput):
|
||||
"""Used to connect iteration outputs. Will be expanded to a specific output."""
|
||||
@@ -821,7 +855,10 @@ class GraphExecutionState(BaseModel):
|
||||
|
||||
# Get values from edges
|
||||
if next_node is not None:
|
||||
self._prepare_inputs(next_node)
|
||||
try:
|
||||
self._prepare_inputs(next_node)
|
||||
except ValidationError as e:
|
||||
raise NodeInputError(next_node, e)
|
||||
|
||||
# If next is still none, there's no next node, return None
|
||||
return next_node
|
||||
|
||||
@@ -180,9 +180,9 @@ class ImagesInterface(InvocationContextInterface):
|
||||
# If `metadata` is provided directly, use that. Else, use the metadata provided by `WithMetadata`, falling back to None.
|
||||
metadata_ = None
|
||||
if metadata:
|
||||
metadata_ = metadata
|
||||
elif isinstance(self._data.invocation, WithMetadata):
|
||||
metadata_ = self._data.invocation.metadata
|
||||
metadata_ = metadata.model_dump_json()
|
||||
elif isinstance(self._data.invocation, WithMetadata) and self._data.invocation.metadata:
|
||||
metadata_ = self._data.invocation.metadata.model_dump_json()
|
||||
|
||||
# If `board_id` is provided directly, use that. Else, use the board provided by `WithBoard`, falling back to None.
|
||||
board_id_ = None
|
||||
@@ -191,6 +191,14 @@ class ImagesInterface(InvocationContextInterface):
|
||||
elif isinstance(self._data.invocation, WithBoard) and self._data.invocation.board:
|
||||
board_id_ = self._data.invocation.board.board_id
|
||||
|
||||
workflow_ = None
|
||||
if self._data.queue_item.workflow:
|
||||
workflow_ = self._data.queue_item.workflow.model_dump_json()
|
||||
|
||||
graph_ = None
|
||||
if self._data.queue_item.session.graph:
|
||||
graph_ = self._data.queue_item.session.graph.model_dump_json()
|
||||
|
||||
return self._services.images.create(
|
||||
image=image,
|
||||
is_intermediate=self._data.invocation.is_intermediate,
|
||||
@@ -198,7 +206,8 @@ class ImagesInterface(InvocationContextInterface):
|
||||
board_id=board_id_,
|
||||
metadata=metadata_,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
workflow=self._data.queue_item.workflow,
|
||||
workflow=workflow_,
|
||||
graph=graph_,
|
||||
session_id=self._data.queue_item.session_id,
|
||||
node_id=self._data.invocation.id,
|
||||
)
|
||||
|
||||
@@ -12,6 +12,7 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_6 import
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_7 import build_migration_7
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_8 import build_migration_8
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_9 import build_migration_9
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_10 import build_migration_10
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
||||
|
||||
|
||||
@@ -41,6 +42,7 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
|
||||
migrator.register_migration(build_migration_7())
|
||||
migrator.register_migration(build_migration_8(app_config=config))
|
||||
migrator.register_migration(build_migration_9())
|
||||
migrator.register_migration(build_migration_10())
|
||||
migrator.run_migrations()
|
||||
|
||||
return db
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
import sqlite3
|
||||
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
|
||||
|
||||
class Migration10Callback:
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
self._update_error_cols(cursor)
|
||||
|
||||
def _update_error_cols(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""
|
||||
- Adds `error_type` and `error_message` columns to the session queue table.
|
||||
- Renames the `error` column to `error_traceback`.
|
||||
"""
|
||||
|
||||
cursor.execute("ALTER TABLE session_queue ADD COLUMN error_type TEXT;")
|
||||
cursor.execute("ALTER TABLE session_queue ADD COLUMN error_message TEXT;")
|
||||
cursor.execute("ALTER TABLE session_queue RENAME COLUMN error TO error_traceback;")
|
||||
|
||||
|
||||
def build_migration_10() -> Migration:
|
||||
"""
|
||||
Build the migration from database version 9 to 10.
|
||||
|
||||
This migration does the following:
|
||||
- Adds `error_type` and `error_message` columns to the session queue table.
|
||||
- Renames the `error` column to `error_traceback`.
|
||||
"""
|
||||
migration_10 = Migration(
|
||||
from_version=9,
|
||||
to_version=10,
|
||||
callback=Migration10Callback(),
|
||||
)
|
||||
|
||||
return migration_10
|
||||
@@ -4,5 +4,4 @@ Initialization file for invokeai.backend.image_util methods.
|
||||
|
||||
from .infill_methods.patchmatch import PatchMatch # noqa: F401
|
||||
from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata # noqa: F401
|
||||
from .seamless import configure_model_padding # noqa: F401
|
||||
from .util import InitImageResizer, make_grid # noqa: F401
|
||||
|
||||
@@ -8,7 +8,7 @@ from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from PIL import Image
|
||||
from PIL import Image, ImageFilter
|
||||
from transformers import AutoFeatureExtractor
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
@@ -16,6 +16,7 @@ from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.silence_warnings import SilenceWarnings
|
||||
|
||||
repo_id = "CompVis/stable-diffusion-safety-checker"
|
||||
CHECKER_PATH = "core/convert/stable-diffusion-safety-checker"
|
||||
|
||||
|
||||
@@ -24,30 +25,30 @@ class SafetyChecker:
|
||||
Wrapper around SafetyChecker model.
|
||||
"""
|
||||
|
||||
safety_checker = None
|
||||
feature_extractor = None
|
||||
tried_load: bool = False
|
||||
safety_checker = None
|
||||
|
||||
@classmethod
|
||||
def _load_safety_checker(cls):
|
||||
if cls.tried_load:
|
||||
if cls.safety_checker is not None and cls.feature_extractor is not None:
|
||||
return
|
||||
|
||||
try:
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(get_config().models_path / CHECKER_PATH)
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(get_config().models_path / CHECKER_PATH)
|
||||
model_path = get_config().models_path / CHECKER_PATH
|
||||
if model_path.exists():
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(model_path)
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(model_path)
|
||||
else:
|
||||
model_path.mkdir(parents=True, exist_ok=True)
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
|
||||
cls.feature_extractor.save_pretrained(model_path, safe_serialization=True)
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(repo_id)
|
||||
cls.safety_checker.save_pretrained(model_path, safe_serialization=True)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load NSFW checker: {str(e)}")
|
||||
cls.tried_load = True
|
||||
|
||||
@classmethod
|
||||
def safety_checker_available(cls) -> bool:
|
||||
return Path(get_config().models_path, CHECKER_PATH).exists()
|
||||
|
||||
@classmethod
|
||||
def has_nsfw_concept(cls, image: Image.Image) -> bool:
|
||||
if not cls.safety_checker_available() and cls.tried_load:
|
||||
return False
|
||||
cls._load_safety_checker()
|
||||
if cls.safety_checker is None or cls.feature_extractor is None:
|
||||
return False
|
||||
@@ -60,3 +61,24 @@ class SafetyChecker:
|
||||
with SilenceWarnings():
|
||||
checked_image, has_nsfw_concept = cls.safety_checker(images=x_image, clip_input=features.pixel_values)
|
||||
return has_nsfw_concept[0]
|
||||
|
||||
@classmethod
|
||||
def blur_if_nsfw(cls, image: Image.Image) -> Image.Image:
|
||||
if cls.has_nsfw_concept(image):
|
||||
logger.warning("A potentially NSFW image has been detected. Image will be blurred.")
|
||||
blurry_image = image.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
caution = cls._get_caution_img()
|
||||
# Center the caution image on the blurred image
|
||||
x = (blurry_image.width - caution.width) // 2
|
||||
y = (blurry_image.height - caution.height) // 2
|
||||
blurry_image.paste(caution, (x, y), caution)
|
||||
image = blurry_image
|
||||
|
||||
return image
|
||||
|
||||
@classmethod
|
||||
def _get_caution_img(cls) -> Image.Image:
|
||||
import invokeai.app.assets.images as image_assets
|
||||
|
||||
caution = Image.open(Path(image_assets.__path__[0]) / "caution.png")
|
||||
return caution.resize((caution.width // 2, caution.height // 2))
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
"""
|
||||
Patch for Conv2d._conv_forward that supports asymmetric padding
|
||||
"""
|
||||
working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"])
|
||||
working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"])
|
||||
return nn.functional.conv2d(
|
||||
working,
|
||||
weight,
|
||||
bias,
|
||||
self.stride,
|
||||
nn.modules.utils._pair(0),
|
||||
self.dilation,
|
||||
self.groups,
|
||||
)
|
||||
|
||||
|
||||
def configure_model_padding(model, seamless, seamless_axes):
|
||||
"""
|
||||
Modifies the 2D convolution layers to use a circular padding mode based on
|
||||
the `seamless` and `seamless_axes` options.
|
||||
"""
|
||||
# TODO: get an explicit interface for this in diffusers: https://github.com/huggingface/diffusers/issues/556
|
||||
for m in model.modules():
|
||||
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
if seamless:
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["x"] = (
|
||||
m._reversed_padding_repeated_twice[0],
|
||||
m._reversed_padding_repeated_twice[1],
|
||||
0,
|
||||
0,
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["y"] = (
|
||||
0,
|
||||
0,
|
||||
m._reversed_padding_repeated_twice[2],
|
||||
m._reversed_padding_repeated_twice[3],
|
||||
)
|
||||
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
|
||||
else:
|
||||
m._conv_forward = nn.Conv2d._conv_forward.__get__(m, nn.Conv2d)
|
||||
if hasattr(m, "asymmetric_padding_mode"):
|
||||
del m.asymmetric_padding_mode
|
||||
if hasattr(m, "asymmetric_padding"):
|
||||
del m.asymmetric_padding
|
||||
@@ -1,89 +1,51 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import contextmanager
|
||||
from typing import Callable, List, Union
|
||||
from typing import Callable, List, Optional, Tuple, Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
|
||||
from diffusers.models.lora import LoRACompatibleConv
|
||||
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
|
||||
|
||||
def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
"""
|
||||
Patch for Conv2d._conv_forward that supports asymmetric padding
|
||||
"""
|
||||
working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"])
|
||||
working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"])
|
||||
return nn.functional.conv2d(
|
||||
working,
|
||||
weight,
|
||||
bias,
|
||||
self.stride,
|
||||
nn.modules.utils._pair(0),
|
||||
self.dilation,
|
||||
self.groups,
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL, AutoencoderTiny], seamless_axes: List[str]):
|
||||
if not seamless_axes:
|
||||
yield
|
||||
return
|
||||
|
||||
# Callable: (input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor
|
||||
to_restore: list[tuple[nn.Conv2d | nn.ConvTranspose2d, Callable]] = []
|
||||
# override conv_forward
|
||||
# https://github.com/huggingface/diffusers/issues/556#issuecomment-1993287019
|
||||
def _conv_forward_asymmetric(self, input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None):
|
||||
self.paddingX = (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0)
|
||||
self.paddingY = (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3])
|
||||
working = torch.nn.functional.pad(input, self.paddingX, mode=x_mode)
|
||||
working = torch.nn.functional.pad(working, self.paddingY, mode=y_mode)
|
||||
return torch.nn.functional.conv2d(
|
||||
working, weight, bias, self.stride, torch.nn.modules.utils._pair(0), self.dilation, self.groups
|
||||
)
|
||||
|
||||
original_layers: List[Tuple[nn.Conv2d, Callable]] = []
|
||||
|
||||
try:
|
||||
# Hard coded to skip down block layers, allowing for seamless tiling at the expense of prompt adherence
|
||||
skipped_layers = 1
|
||||
for m_name, m in model.named_modules():
|
||||
if not isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
continue
|
||||
x_mode = "circular" if "x" in seamless_axes else "constant"
|
||||
y_mode = "circular" if "y" in seamless_axes else "constant"
|
||||
|
||||
if isinstance(model, UNet2DConditionModel) and m_name.startswith("down_blocks.") and ".resnets." in m_name:
|
||||
# down_blocks.1.resnets.1.conv1
|
||||
_, block_num, _, resnet_num, submodule_name = m_name.split(".")
|
||||
block_num = int(block_num)
|
||||
resnet_num = int(resnet_num)
|
||||
conv_layers: List[torch.nn.Conv2d] = []
|
||||
|
||||
if block_num >= len(model.down_blocks) - skipped_layers:
|
||||
continue
|
||||
for module in model.modules():
|
||||
if isinstance(module, torch.nn.Conv2d):
|
||||
conv_layers.append(module)
|
||||
|
||||
# Skip the second resnet (could be configurable)
|
||||
if resnet_num > 0:
|
||||
continue
|
||||
|
||||
# Skip Conv2d layers (could be configurable)
|
||||
if submodule_name == "conv2":
|
||||
continue
|
||||
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["x"] = (
|
||||
m._reversed_padding_repeated_twice[0],
|
||||
m._reversed_padding_repeated_twice[1],
|
||||
0,
|
||||
0,
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["y"] = (
|
||||
0,
|
||||
0,
|
||||
m._reversed_padding_repeated_twice[2],
|
||||
m._reversed_padding_repeated_twice[3],
|
||||
)
|
||||
|
||||
to_restore.append((m, m._conv_forward))
|
||||
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
|
||||
for layer in conv_layers:
|
||||
if isinstance(layer, LoRACompatibleConv) and layer.lora_layer is None:
|
||||
layer.lora_layer = lambda *x: 0
|
||||
original_layers.append((layer, layer._conv_forward))
|
||||
layer._conv_forward = _conv_forward_asymmetric.__get__(layer, torch.nn.Conv2d)
|
||||
|
||||
yield
|
||||
|
||||
finally:
|
||||
for module, orig_conv_forward in to_restore:
|
||||
module._conv_forward = orig_conv_forward
|
||||
if hasattr(module, "asymmetric_padding_mode"):
|
||||
del module.asymmetric_padding_mode
|
||||
if hasattr(module, "asymmetric_padding"):
|
||||
del module.asymmetric_padding
|
||||
for layer, orig_conv_forward in original_layers:
|
||||
layer._conv_forward = orig_conv_forward
|
||||
|
||||
@@ -10,6 +10,8 @@ module.exports = {
|
||||
'path/no-relative-imports': ['error', { maxDepth: 0 }],
|
||||
// https://github.com/edvardchen/eslint-plugin-i18next/blob/HEAD/docs/rules/no-literal-string.md
|
||||
'i18next/no-literal-string': 'error',
|
||||
// https://eslint.org/docs/latest/rules/no-console
|
||||
'no-console': 'error',
|
||||
},
|
||||
overrides: [
|
||||
/**
|
||||
|
||||
3
invokeai/frontend/web/.gitignore
vendored
3
invokeai/frontend/web/.gitignore
vendored
@@ -43,4 +43,5 @@ stats.html
|
||||
yalc.lock
|
||||
|
||||
# vitest
|
||||
tsconfig.vitest-temp.json
|
||||
tsconfig.vitest-temp.json
|
||||
coverage/
|
||||
@@ -35,6 +35,7 @@
|
||||
"storybook": "storybook dev -p 6006",
|
||||
"build-storybook": "storybook build",
|
||||
"test": "vitest",
|
||||
"test:ui": "vitest --coverage --ui",
|
||||
"test:no-watch": "vitest --no-watch"
|
||||
},
|
||||
"madge": {
|
||||
@@ -132,6 +133,8 @@
|
||||
"@types/react-dom": "^18.3.0",
|
||||
"@types/uuid": "^9.0.8",
|
||||
"@vitejs/plugin-react-swc": "^3.6.0",
|
||||
"@vitest/coverage-v8": "^1.5.0",
|
||||
"@vitest/ui": "^1.5.0",
|
||||
"concurrently": "^8.2.2",
|
||||
"dpdm": "^3.14.0",
|
||||
"eslint": "^8.57.0",
|
||||
|
||||
148
invokeai/frontend/web/pnpm-lock.yaml
generated
148
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -229,6 +229,12 @@ devDependencies:
|
||||
'@vitejs/plugin-react-swc':
|
||||
specifier: ^3.6.0
|
||||
version: 3.6.0(vite@5.2.11)
|
||||
'@vitest/coverage-v8':
|
||||
specifier: ^1.5.0
|
||||
version: 1.6.0(vitest@1.6.0)
|
||||
'@vitest/ui':
|
||||
specifier: ^1.5.0
|
||||
version: 1.6.0(vitest@1.6.0)
|
||||
concurrently:
|
||||
specifier: ^8.2.2
|
||||
version: 8.2.2
|
||||
@@ -288,7 +294,7 @@ devDependencies:
|
||||
version: 4.3.2(typescript@5.4.5)(vite@5.2.11)
|
||||
vitest:
|
||||
specifier: ^1.6.0
|
||||
version: 1.6.0(@types/node@20.12.10)
|
||||
version: 1.6.0(@types/node@20.12.10)(@vitest/ui@1.6.0)
|
||||
|
||||
packages:
|
||||
|
||||
@@ -1679,6 +1685,10 @@ packages:
|
||||
resolution: {integrity: sha512-4iri8i1AqYHJE2DstZYkyEprg6Pq6sKx3xn5FpySk9sNhH7qN2LLlHJCfDTZRILNwQNPD7mATWM0TBui7uC1pA==}
|
||||
dev: true
|
||||
|
||||
/@bcoe/v8-coverage@0.2.3:
|
||||
resolution: {integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==}
|
||||
dev: true
|
||||
|
||||
/@chakra-ui/accordion@2.3.1(@chakra-ui/system@2.6.2)(framer-motion@10.18.0)(react@18.3.1):
|
||||
resolution: {integrity: sha512-FSXRm8iClFyU+gVaXisOSEw0/4Q+qZbFRiuhIAkVU6Boj0FxAMrlo9a8AV5TuF77rgaHytCdHk0Ng+cyUijrag==}
|
||||
peerDependencies:
|
||||
@@ -3635,6 +3645,11 @@ packages:
|
||||
wrap-ansi-cjs: /wrap-ansi@7.0.0
|
||||
dev: true
|
||||
|
||||
/@istanbuljs/schema@0.1.3:
|
||||
resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==}
|
||||
engines: {node: '>=8'}
|
||||
dev: true
|
||||
|
||||
/@jest/schemas@29.6.3:
|
||||
resolution: {integrity: sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==}
|
||||
engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0}
|
||||
@@ -3822,6 +3837,10 @@ packages:
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@polka/url@1.0.0-next.25:
|
||||
resolution: {integrity: sha512-j7P6Rgr3mmtdkeDGTe0E/aYyWEWVtc5yFXtHCRHs28/jptDEWfaVOc5T7cblqy1XKPPfCxJc/8DwQ5YgLOZOVQ==}
|
||||
dev: true
|
||||
|
||||
/@popperjs/core@2.11.8:
|
||||
resolution: {integrity: sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==}
|
||||
dev: false
|
||||
@@ -5146,7 +5165,7 @@ packages:
|
||||
dom-accessibility-api: 0.6.3
|
||||
lodash: 4.17.21
|
||||
redent: 3.0.0
|
||||
vitest: 1.6.0(@types/node@20.12.10)
|
||||
vitest: 1.6.0(@types/node@20.12.10)(@vitest/ui@1.6.0)
|
||||
dev: true
|
||||
|
||||
/@testing-library/user-event@14.5.2(@testing-library/dom@9.3.4):
|
||||
@@ -5825,6 +5844,29 @@ packages:
|
||||
- '@swc/helpers'
|
||||
dev: true
|
||||
|
||||
/@vitest/coverage-v8@1.6.0(vitest@1.6.0):
|
||||
resolution: {integrity: sha512-KvapcbMY/8GYIG0rlwwOKCVNRc0OL20rrhFkg/CHNzncV03TE2XWvO5w9uZYoxNiMEBacAJt3unSOiZ7svePew==}
|
||||
peerDependencies:
|
||||
vitest: 1.6.0
|
||||
dependencies:
|
||||
'@ampproject/remapping': 2.3.0
|
||||
'@bcoe/v8-coverage': 0.2.3
|
||||
debug: 4.3.4
|
||||
istanbul-lib-coverage: 3.2.2
|
||||
istanbul-lib-report: 3.0.1
|
||||
istanbul-lib-source-maps: 5.0.4
|
||||
istanbul-reports: 3.1.7
|
||||
magic-string: 0.30.10
|
||||
magicast: 0.3.4
|
||||
picocolors: 1.0.0
|
||||
std-env: 3.7.0
|
||||
strip-literal: 2.1.0
|
||||
test-exclude: 6.0.0
|
||||
vitest: 1.6.0(@types/node@20.12.10)(@vitest/ui@1.6.0)
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
dev: true
|
||||
|
||||
/@vitest/expect@1.3.1:
|
||||
resolution: {integrity: sha512-xofQFwIzfdmLLlHa6ag0dPV8YsnKOCP1KdAeVVh34vSjN2dcUiXYCD9htu/9eM7t8Xln4v03U9HLxLpPlsXdZw==}
|
||||
dependencies:
|
||||
@@ -5869,6 +5911,21 @@ packages:
|
||||
tinyspy: 2.2.1
|
||||
dev: true
|
||||
|
||||
/@vitest/ui@1.6.0(vitest@1.6.0):
|
||||
resolution: {integrity: sha512-k3Lyo+ONLOgylctiGovRKy7V4+dIN2yxstX3eY5cWFXH6WP+ooVX79YSyi0GagdTQzLmT43BF27T0s6dOIPBXA==}
|
||||
peerDependencies:
|
||||
vitest: 1.6.0
|
||||
dependencies:
|
||||
'@vitest/utils': 1.6.0
|
||||
fast-glob: 3.3.2
|
||||
fflate: 0.8.2
|
||||
flatted: 3.3.1
|
||||
pathe: 1.1.2
|
||||
picocolors: 1.0.0
|
||||
sirv: 2.0.4
|
||||
vitest: 1.6.0(@types/node@20.12.10)(@vitest/ui@1.6.0)
|
||||
dev: true
|
||||
|
||||
/@vitest/utils@1.3.1:
|
||||
resolution: {integrity: sha512-d3Waie/299qqRyHTm2DjADeTaNdNSVsnwHPWrs20JMpjh6eiVq7ggggweO8rc4arhf6rRkWuHKwvxGvejUXZZQ==}
|
||||
dependencies:
|
||||
@@ -8521,6 +8578,10 @@ packages:
|
||||
resolution: {integrity: sha512-3yurQZ2hD9VISAhJJP9bpYFNQrHHBXE2JxxjY5aLEcDi46RmAzJE2OC9FAde0yis5ElW0jTTzs0zfg/Cca4XqQ==}
|
||||
dev: true
|
||||
|
||||
/fflate@0.8.2:
|
||||
resolution: {integrity: sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==}
|
||||
dev: true
|
||||
|
||||
/file-entry-cache@6.0.1:
|
||||
resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==}
|
||||
engines: {node: ^10.12.0 || >=12.0.0}
|
||||
@@ -9084,6 +9145,10 @@ packages:
|
||||
resolution: {integrity: sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==}
|
||||
dev: true
|
||||
|
||||
/html-escaper@2.0.2:
|
||||
resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==}
|
||||
dev: true
|
||||
|
||||
/html-parse-stringify@3.0.1:
|
||||
resolution: {integrity: sha512-KknJ50kTInJ7qIScF3jeaFRpMpE8/lfiTdzf/twXyPBLAGrLRTmkz3AdTnKeh40X8k9L2fdYwEp/42WGXIRGcg==}
|
||||
dependencies:
|
||||
@@ -9513,6 +9578,39 @@ packages:
|
||||
engines: {node: '>=0.10.0'}
|
||||
dev: true
|
||||
|
||||
/istanbul-lib-coverage@3.2.2:
|
||||
resolution: {integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==}
|
||||
engines: {node: '>=8'}
|
||||
dev: true
|
||||
|
||||
/istanbul-lib-report@3.0.1:
|
||||
resolution: {integrity: sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==}
|
||||
engines: {node: '>=10'}
|
||||
dependencies:
|
||||
istanbul-lib-coverage: 3.2.2
|
||||
make-dir: 4.0.0
|
||||
supports-color: 7.2.0
|
||||
dev: true
|
||||
|
||||
/istanbul-lib-source-maps@5.0.4:
|
||||
resolution: {integrity: sha512-wHOoEsNJTVltaJp8eVkm8w+GVkVNHT2YDYo53YdzQEL2gWm1hBX5cGFR9hQJtuGLebidVX7et3+dmDZrmclduw==}
|
||||
engines: {node: '>=10'}
|
||||
dependencies:
|
||||
'@jridgewell/trace-mapping': 0.3.25
|
||||
debug: 4.3.4
|
||||
istanbul-lib-coverage: 3.2.2
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
dev: true
|
||||
|
||||
/istanbul-reports@3.1.7:
|
||||
resolution: {integrity: sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==}
|
||||
engines: {node: '>=8'}
|
||||
dependencies:
|
||||
html-escaper: 2.0.2
|
||||
istanbul-lib-report: 3.0.1
|
||||
dev: true
|
||||
|
||||
/iterable-lookahead@1.0.0:
|
||||
resolution: {integrity: sha512-hJnEP2Xk4+44DDwJqUQGdXal5VbyeWLaPyDl2AQc242Zr7iqz4DgpQOrEzglWVMGHMDCkguLHEKxd1+rOsmgSQ==}
|
||||
engines: {node: '>=4'}
|
||||
@@ -9912,6 +10010,14 @@ packages:
|
||||
'@jridgewell/sourcemap-codec': 1.4.15
|
||||
dev: true
|
||||
|
||||
/magicast@0.3.4:
|
||||
resolution: {integrity: sha512-TyDF/Pn36bBji9rWKHlZe+PZb6Mx5V8IHCSxk7X4aljM4e/vyDvZZYwHewdVaqiA0nb3ghfHU/6AUpDxWoER2Q==}
|
||||
dependencies:
|
||||
'@babel/parser': 7.24.5
|
||||
'@babel/types': 7.24.5
|
||||
source-map-js: 1.2.0
|
||||
dev: true
|
||||
|
||||
/make-dir@2.1.0:
|
||||
resolution: {integrity: sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==}
|
||||
engines: {node: '>=6'}
|
||||
@@ -9927,6 +10033,13 @@ packages:
|
||||
semver: 6.3.1
|
||||
dev: true
|
||||
|
||||
/make-dir@4.0.0:
|
||||
resolution: {integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==}
|
||||
engines: {node: '>=10'}
|
||||
dependencies:
|
||||
semver: 7.6.0
|
||||
dev: true
|
||||
|
||||
/map-obj@2.0.0:
|
||||
resolution: {integrity: sha512-TzQSV2DiMYgoF5RycneKVUzIa9bQsj/B3tTgsE3dOGqlzHnGIDaC7XBE7grnA+8kZPnfqSGFe95VHc2oc0VFUQ==}
|
||||
engines: {node: '>=4'}
|
||||
@@ -10101,6 +10214,11 @@ packages:
|
||||
resolution: {integrity: sha512-iSAJLHYKnX41mKcJKjqvnAN9sf0LMDTXDEvFv+ffuRR9a1MIuXLjMNL6EsnDHSkKLTWNqQQ5uo61P4EbU4NU+Q==}
|
||||
dev: false
|
||||
|
||||
/mrmime@2.0.0:
|
||||
resolution: {integrity: sha512-eu38+hdgojoyq63s+yTpN4XMBdt5l8HhMhc4VKLO9KM5caLIBvUm4thi7fFaxyTmCKeNnXZ5pAlBwCUnhA09uw==}
|
||||
engines: {node: '>=10'}
|
||||
dev: true
|
||||
|
||||
/ms@2.0.0:
|
||||
resolution: {integrity: sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==}
|
||||
dev: true
|
||||
@@ -11766,6 +11884,15 @@ packages:
|
||||
engines: {node: '>=14'}
|
||||
dev: true
|
||||
|
||||
/sirv@2.0.4:
|
||||
resolution: {integrity: sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ==}
|
||||
engines: {node: '>= 10'}
|
||||
dependencies:
|
||||
'@polka/url': 1.0.0-next.25
|
||||
mrmime: 2.0.0
|
||||
totalist: 3.0.1
|
||||
dev: true
|
||||
|
||||
/sisteransi@1.0.5:
|
||||
resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==}
|
||||
dev: true
|
||||
@@ -12191,6 +12318,15 @@ packages:
|
||||
unique-string: 2.0.0
|
||||
dev: true
|
||||
|
||||
/test-exclude@6.0.0:
|
||||
resolution: {integrity: sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==}
|
||||
engines: {node: '>=8'}
|
||||
dependencies:
|
||||
'@istanbuljs/schema': 0.1.3
|
||||
glob: 7.2.3
|
||||
minimatch: 3.1.2
|
||||
dev: true
|
||||
|
||||
/text-table@0.2.0:
|
||||
resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==}
|
||||
dev: true
|
||||
@@ -12264,6 +12400,11 @@ packages:
|
||||
engines: {node: '>=0.6'}
|
||||
dev: true
|
||||
|
||||
/totalist@3.0.1:
|
||||
resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==}
|
||||
engines: {node: '>=6'}
|
||||
dev: true
|
||||
|
||||
/tr46@0.0.3:
|
||||
resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==}
|
||||
|
||||
@@ -12837,7 +12978,7 @@ packages:
|
||||
fsevents: 2.3.3
|
||||
dev: true
|
||||
|
||||
/vitest@1.6.0(@types/node@20.12.10):
|
||||
/vitest@1.6.0(@types/node@20.12.10)(@vitest/ui@1.6.0):
|
||||
resolution: {integrity: sha512-H5r/dN06swuFnzNFhq/dnz37bPXnq8xB2xB5JOVk8K09rUtoeNN+LHWkoQ0A/i3hvbUKKcCei9KpbxqHMLhLLA==}
|
||||
engines: {node: ^18.0.0 || >=20.0.0}
|
||||
hasBin: true
|
||||
@@ -12867,6 +13008,7 @@ packages:
|
||||
'@vitest/runner': 1.6.0
|
||||
'@vitest/snapshot': 1.6.0
|
||||
'@vitest/spy': 1.6.0
|
||||
'@vitest/ui': 1.6.0(vitest@1.6.0)
|
||||
'@vitest/utils': 1.6.0
|
||||
acorn-walk: 8.3.2
|
||||
chai: 4.4.1
|
||||
|
||||
@@ -76,7 +76,9 @@
|
||||
"aboutHeading": "Nutzen Sie Ihre kreative Energie",
|
||||
"toResolve": "Lösen",
|
||||
"add": "Hinzufügen",
|
||||
"loglevel": "Protokoll Stufe"
|
||||
"loglevel": "Protokoll Stufe",
|
||||
"selected": "Ausgewählt",
|
||||
"beta": "Beta"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Bildgröße",
|
||||
@@ -86,7 +88,7 @@
|
||||
"noImagesInGallery": "Keine Bilder in der Galerie",
|
||||
"loading": "Lade",
|
||||
"deleteImage_one": "Lösche Bild",
|
||||
"deleteImage_other": "",
|
||||
"deleteImage_other": "Lösche {{count}} Bilder",
|
||||
"copy": "Kopieren",
|
||||
"download": "Runterladen",
|
||||
"setCurrentImage": "Setze aktuelle Bild",
|
||||
@@ -397,7 +399,14 @@
|
||||
"cancel": "Stornieren",
|
||||
"defaultSettingsSaved": "Standardeinstellungen gespeichert",
|
||||
"addModels": "Model hinzufügen",
|
||||
"deleteModelImage": "Lösche Model Bild"
|
||||
"deleteModelImage": "Lösche Model Bild",
|
||||
"hfTokenInvalidErrorMessage": "Falscher oder fehlender HuggingFace Schlüssel.",
|
||||
"huggingFaceRepoID": "HuggingFace Repo ID",
|
||||
"hfToken": "HuggingFace Schlüssel",
|
||||
"hfTokenInvalid": "Falscher oder fehlender HF Schlüssel",
|
||||
"huggingFacePlaceholder": "besitzer/model-name",
|
||||
"hfTokenSaved": "HF Schlüssel gespeichert",
|
||||
"hfTokenUnableToVerify": "Konnte den HF Schlüssel nicht validieren"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Bilder",
|
||||
@@ -686,7 +695,11 @@
|
||||
"hands": "Hände",
|
||||
"dwOpenpose": "DW Openpose",
|
||||
"dwOpenposeDescription": "Posenschätzung mit DW Openpose",
|
||||
"selectCLIPVisionModel": "Wähle ein CLIP Vision Model aus"
|
||||
"selectCLIPVisionModel": "Wähle ein CLIP Vision Model aus",
|
||||
"ipAdapterMethod": "Methode",
|
||||
"composition": "Nur Komposition",
|
||||
"full": "Voll",
|
||||
"style": "Nur Style"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@@ -717,7 +730,6 @@
|
||||
"resume": "Wieder aufnehmen",
|
||||
"item": "Auftrag",
|
||||
"notReady": "Warteschlange noch nicht bereit",
|
||||
"queueCountPrediction": "{{promptsCount}} Prompts × {{iterations}} Iterationen -> {{count}} Generationen",
|
||||
"clearQueueAlertDialog": "\"Die Warteschlange leeren\" stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
|
||||
"completedIn": "Fertig in",
|
||||
"cancelBatchSucceeded": "Stapel abgebrochen",
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
"accessibility": {
|
||||
"about": "About",
|
||||
"createIssue": "Create Issue",
|
||||
"submitSupportTicket": "Submit Support Ticket",
|
||||
"invokeProgressBar": "Invoke progress bar",
|
||||
"menu": "Menu",
|
||||
"mode": "Mode",
|
||||
@@ -146,7 +147,9 @@
|
||||
"viewing": "Viewing",
|
||||
"viewingDesc": "Review images in a large gallery view",
|
||||
"editing": "Editing",
|
||||
"editingDesc": "Edit on the Control Layers canvas"
|
||||
"editingDesc": "Edit on the Control Layers canvas",
|
||||
"enabled": "Enabled",
|
||||
"disabled": "Disabled"
|
||||
},
|
||||
"controlnet": {
|
||||
"controlAdapter_one": "Control Adapter",
|
||||
@@ -261,7 +264,6 @@
|
||||
"queue": "Queue",
|
||||
"queueFront": "Add to Front of Queue",
|
||||
"queueBack": "Add to Queue",
|
||||
"queueCountPrediction": "{{promptsCount}} prompts \u00d7 {{iterations}} iterations -> {{count}} generations",
|
||||
"queueEmpty": "Queue Empty",
|
||||
"enqueueing": "Queueing Batch",
|
||||
"resume": "Resume",
|
||||
@@ -314,7 +316,13 @@
|
||||
"batchFailedToQueue": "Failed to Queue Batch",
|
||||
"graphQueued": "Graph queued",
|
||||
"graphFailedToQueue": "Failed to queue graph",
|
||||
"openQueue": "Open Queue"
|
||||
"openQueue": "Open Queue",
|
||||
"prompts_one": "Prompt",
|
||||
"prompts_other": "Prompts",
|
||||
"iterations_one": "Iteration",
|
||||
"iterations_other": "Iterations",
|
||||
"generations_one": "Generation",
|
||||
"generations_other": "Generations"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "Invocation Cache",
|
||||
@@ -769,10 +777,15 @@
|
||||
"cannotConnectOutputToOutput": "Cannot connect output to output",
|
||||
"cannotConnectToSelf": "Cannot connect to self",
|
||||
"cannotDuplicateConnection": "Cannot create duplicate connections",
|
||||
"cannotMixAndMatchCollectionItemTypes": "Cannot mix and match collection item types",
|
||||
"missingNode": "Missing invocation node",
|
||||
"missingInvocationTemplate": "Missing invocation template",
|
||||
"missingFieldTemplate": "Missing field template",
|
||||
"nodePack": "Node pack",
|
||||
"collection": "Collection",
|
||||
"collectionFieldType": "{{name}} Collection",
|
||||
"collectionOrScalarFieldType": "{{name}} Collection|Scalar",
|
||||
"singleFieldType": "{{name}} (Single)",
|
||||
"collectionFieldType": "{{name}} (Collection)",
|
||||
"collectionOrScalarFieldType": "{{name}} (Single or Collection)",
|
||||
"colorCodeEdges": "Color-Code Edges",
|
||||
"colorCodeEdgesHelp": "Color-code edges according to their connected fields",
|
||||
"connectionWouldCreateCycle": "Connection would create a cycle",
|
||||
@@ -874,6 +887,7 @@
|
||||
"versionUnknown": " Version Unknown",
|
||||
"workflow": "Workflow",
|
||||
"graph": "Graph",
|
||||
"noGraph": "No Graph",
|
||||
"workflowAuthor": "Author",
|
||||
"workflowContact": "Contact",
|
||||
"workflowDescription": "Short Description",
|
||||
@@ -886,7 +900,10 @@
|
||||
"zoomInNodes": "Zoom In",
|
||||
"zoomOutNodes": "Zoom Out",
|
||||
"betaDesc": "This invocation is in beta. Until it is stable, it may have breaking changes during app updates. We plan to support this invocation long-term.",
|
||||
"prototypeDesc": "This invocation is a prototype. It may have breaking changes during app updates and may be removed at any time."
|
||||
"prototypeDesc": "This invocation is a prototype. It may have breaking changes during app updates and may be removed at any time.",
|
||||
"imageAccessError": "Unable to find image {{image_name}}, resetting to default",
|
||||
"boardAccessError": "Unable to find board {{board_id}}, resetting to default",
|
||||
"modelAccessError": "Unable to find model {{key}}, resetting to default"
|
||||
},
|
||||
"parameters": {
|
||||
"aspect": "Aspect",
|
||||
@@ -934,7 +951,20 @@
|
||||
"noModelSelected": "No model selected",
|
||||
"noPrompts": "No prompts generated",
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"systemDisconnected": "System disconnected"
|
||||
"systemDisconnected": "System disconnected",
|
||||
"layer": {
|
||||
"initialImageNoImageSelected": "no initial image selected",
|
||||
"controlAdapterNoModelSelected": "no Control Adapter model selected",
|
||||
"controlAdapterIncompatibleBaseModel": "incompatible Control Adapter base model",
|
||||
"controlAdapterNoImageSelected": "no Control Adapter image selected",
|
||||
"controlAdapterImageNotProcessed": "Control Adapter image not processed",
|
||||
"t2iAdapterIncompatibleDimensions": "T2I Adapter requires image dimension to be multiples of {{multiple}}",
|
||||
"ipAdapterNoModelSelected": "no IP adapter selected",
|
||||
"ipAdapterIncompatibleBaseModel": "incompatible IP Adapter base model",
|
||||
"ipAdapterNoImageSelected": "no IP Adapter image selected",
|
||||
"rgNoPromptsOrIPAdapters": "no text prompts or IP Adapters",
|
||||
"rgNoRegion": "no region selected"
|
||||
}
|
||||
},
|
||||
"maskBlur": "Mask Blur",
|
||||
"negativePromptPlaceholder": "Negative Prompt",
|
||||
@@ -945,8 +975,6 @@
|
||||
"positivePromptPlaceholder": "Positive Prompt",
|
||||
"globalPositivePromptPlaceholder": "Global Positive Prompt",
|
||||
"iterations": "Iterations",
|
||||
"iterationsWithCount_one": "{{count}} Iteration",
|
||||
"iterationsWithCount_other": "{{count}} Iterations",
|
||||
"scale": "Scale",
|
||||
"scaleBeforeProcessing": "Scale Before Processing",
|
||||
"scaledHeight": "Scaled H",
|
||||
@@ -1048,8 +1076,9 @@
|
||||
},
|
||||
"toast": {
|
||||
"addedToBoard": "Added to board",
|
||||
"baseModelChangedCleared_one": "Base model changed, cleared or disabled {{count}} incompatible submodel",
|
||||
"baseModelChangedCleared_other": "Base model changed, cleared or disabled {{count}} incompatible submodels",
|
||||
"baseModelChanged": "Base Model Changed",
|
||||
"baseModelChangedCleared_one": "Cleared or disabled {{count}} incompatible submodel",
|
||||
"baseModelChangedCleared_other": "Cleared or disabled {{count}} incompatible submodels",
|
||||
"canceled": "Processing Canceled",
|
||||
"canvasCopiedClipboard": "Canvas Copied to Clipboard",
|
||||
"canvasDownloaded": "Canvas Downloaded",
|
||||
@@ -1070,10 +1099,17 @@
|
||||
"metadataLoadFailed": "Failed to load metadata",
|
||||
"modelAddedSimple": "Model Added to Queue",
|
||||
"modelImportCanceled": "Model Import Canceled",
|
||||
"outOfMemoryError": "Out of Memory Error",
|
||||
"outOfMemoryErrorDesc": "Your current generation settings exceed system capacity. Please adjust your settings and try again.",
|
||||
"parameters": "Parameters",
|
||||
"parameterNotSet": "{{parameter}} not set",
|
||||
"parameterSet": "{{parameter}} set",
|
||||
"parametersNotSet": "Parameters Not Set",
|
||||
"parameterSet": "Parameter Recalled",
|
||||
"parameterSetDesc": "Recalled {{parameter}}",
|
||||
"parameterNotSet": "Parameter Recalled",
|
||||
"parameterNotSetDesc": "Unable to recall {{parameter}}",
|
||||
"parameterNotSetDescWithMessage": "Unable to recall {{parameter}}: {{message}}",
|
||||
"parametersSet": "Parameters Recalled",
|
||||
"parametersNotSet": "Parameters Not Recalled",
|
||||
"errorCopied": "Error Copied",
|
||||
"problemCopyingCanvas": "Problem Copying Canvas",
|
||||
"problemCopyingCanvasDesc": "Unable to export base layer",
|
||||
"problemCopyingImage": "Unable to Copy Image",
|
||||
@@ -1093,11 +1129,13 @@
|
||||
"sentToImageToImage": "Sent To Image To Image",
|
||||
"sentToUnifiedCanvas": "Sent to Unified Canvas",
|
||||
"serverError": "Server Error",
|
||||
"sessionRef": "Session: {{sessionId}}",
|
||||
"setAsCanvasInitialImage": "Set as canvas initial image",
|
||||
"setCanvasInitialImage": "Set canvas initial image",
|
||||
"setControlImage": "Set as control image",
|
||||
"setInitialImage": "Set as initial image",
|
||||
"setNodeField": "Set as node field",
|
||||
"somethingWentWrong": "Something Went Wrong",
|
||||
"uploadFailed": "Upload failed",
|
||||
"uploadFailedInvalidUploadDesc": "Must be single PNG or JPEG image",
|
||||
"uploadInitialImage": "Upload Initial Image",
|
||||
@@ -1537,7 +1575,6 @@
|
||||
"controlLayers": "Control Layers",
|
||||
"globalMaskOpacity": "Global Mask Opacity",
|
||||
"autoNegative": "Auto Negative",
|
||||
"toggleVisibility": "Toggle Layer Visibility",
|
||||
"deletePrompt": "Delete Prompt",
|
||||
"resetRegion": "Reset Region",
|
||||
"debugLayers": "Debug Layers",
|
||||
|
||||
@@ -25,7 +25,24 @@
|
||||
"areYouSure": "¿Estas seguro?",
|
||||
"batch": "Administrador de lotes",
|
||||
"modelManager": "Administrador de modelos",
|
||||
"communityLabel": "Comunidad"
|
||||
"communityLabel": "Comunidad",
|
||||
"direction": "Dirección",
|
||||
"ai": "Ia",
|
||||
"add": "Añadir",
|
||||
"auto": "Automático",
|
||||
"copyError": "Error $t(gallery.copy)",
|
||||
"details": "Detalles",
|
||||
"or": "o",
|
||||
"checkpoint": "Punto de control",
|
||||
"controlNet": "ControlNet",
|
||||
"aboutHeading": "Sea dueño de su poder creativo",
|
||||
"advanced": "Avanzado",
|
||||
"data": "Fecha",
|
||||
"delete": "Borrar",
|
||||
"copy": "Copiar",
|
||||
"beta": "Beta",
|
||||
"on": "En",
|
||||
"aboutDesc": "¿Utilizas Invoke para trabajar? Mira aquí:"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Tamaño de la imagen",
|
||||
@@ -365,7 +382,7 @@
|
||||
"canvasMerged": "Lienzo consolidado",
|
||||
"sentToImageToImage": "Enviar hacia Imagen a Imagen",
|
||||
"sentToUnifiedCanvas": "Enviar hacia Lienzo Consolidado",
|
||||
"parametersNotSet": "Parámetros no establecidos",
|
||||
"parametersNotSet": "Parámetros no recuperados",
|
||||
"metadataLoadFailed": "Error al cargar metadatos",
|
||||
"serverError": "Error en el servidor",
|
||||
"canceled": "Procesando la cancelación",
|
||||
@@ -373,7 +390,8 @@
|
||||
"uploadFailedInvalidUploadDesc": "Debe ser una sola imagen PNG o JPEG",
|
||||
"parameterSet": "Conjunto de parámetros",
|
||||
"parameterNotSet": "Parámetro no configurado",
|
||||
"problemCopyingImage": "No se puede copiar la imagen"
|
||||
"problemCopyingImage": "No se puede copiar la imagen",
|
||||
"errorCopied": "Error al copiar"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
@@ -443,7 +461,13 @@
|
||||
"previousImage": "Imagen anterior",
|
||||
"nextImage": "Siguiente imagen",
|
||||
"showOptionsPanel": "Mostrar el panel lateral",
|
||||
"menu": "Menú"
|
||||
"menu": "Menú",
|
||||
"showGalleryPanel": "Mostrar panel de galería",
|
||||
"loadMore": "Cargar más",
|
||||
"about": "Acerca de",
|
||||
"createIssue": "Crear un problema",
|
||||
"resetUI": "Interfaz de usuario $t(accessibility.reset)",
|
||||
"mode": "Modo"
|
||||
},
|
||||
"nodes": {
|
||||
"zoomInNodes": "Acercar",
|
||||
@@ -456,5 +480,68 @@
|
||||
"reloadNodeTemplates": "Recargar las plantillas de nodos",
|
||||
"loadWorkflow": "Cargar el flujo de trabajo",
|
||||
"downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Agregar panel automáticamente",
|
||||
"changeBoard": "Cambiar el panel",
|
||||
"clearSearch": "Borrar la búsqueda",
|
||||
"deleteBoard": "Borrar el panel",
|
||||
"selectBoard": "Seleccionar un panel",
|
||||
"uncategorized": "Sin categoría",
|
||||
"cancel": "Cancelar",
|
||||
"addBoard": "Agregar un panel",
|
||||
"movingImagesToBoard_one": "Moviendo {{count}} imagen al panel:",
|
||||
"movingImagesToBoard_many": "Moviendo {{count}} imágenes al panel:",
|
||||
"movingImagesToBoard_other": "Moviendo {{count}} imágenes al panel:",
|
||||
"bottomMessage": "Al eliminar este panel y las imágenes que contiene, se restablecerán las funciones que los estén utilizando actualmente.",
|
||||
"deleteBoardAndImages": "Borrar el panel y las imágenes",
|
||||
"loading": "Cargando...",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar",
|
||||
"move": "Mover",
|
||||
"menuItemAutoAdd": "Agregar automáticamente a este panel",
|
||||
"searchBoard": "Buscando paneles…",
|
||||
"topMessage": "Este panel contiene imágenes utilizadas en las siguientes funciones:",
|
||||
"downloadBoard": "Descargar panel",
|
||||
"deleteBoardOnly": "Borrar solo el panel",
|
||||
"myBoard": "Mi panel",
|
||||
"noMatching": "No hay paneles que coincidan"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
"title": "Composición",
|
||||
"infillTab": "Relleno"
|
||||
},
|
||||
"generation": {
|
||||
"title": "Generación"
|
||||
},
|
||||
"image": {
|
||||
"title": "Imagen"
|
||||
},
|
||||
"control": {
|
||||
"title": "Control"
|
||||
},
|
||||
"advanced": {
|
||||
"options": "$t(accordions.advanced.title) opciones",
|
||||
"title": "Avanzado"
|
||||
}
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"canvas": "Lienzo",
|
||||
"generation": "Generación",
|
||||
"queue": "Cola",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
|
||||
"workflows": "Flujos de trabajo",
|
||||
"models": "Modelos",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)"
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
"layers_one": "Capa",
|
||||
"layers_many": "Capas",
|
||||
"layers_other": "Capas"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"reportBugLabel": "Segnala un errore",
|
||||
"settingsLabel": "Impostazioni",
|
||||
"img2img": "Immagine a Immagine",
|
||||
"unifiedCanvas": "Tela unificata",
|
||||
"unifiedCanvas": "Tela",
|
||||
"nodes": "Flussi di lavoro",
|
||||
"upload": "Caricamento",
|
||||
"load": "Carica",
|
||||
@@ -74,7 +74,18 @@
|
||||
"file": "File",
|
||||
"toResolve": "Da risolvere",
|
||||
"add": "Aggiungi",
|
||||
"loglevel": "Livello di log"
|
||||
"loglevel": "Livello di log",
|
||||
"beta": "Beta",
|
||||
"positivePrompt": "Prompt positivo",
|
||||
"negativePrompt": "Prompt negativo",
|
||||
"selected": "Selezionato",
|
||||
"goTo": "Vai a",
|
||||
"editor": "Editor",
|
||||
"tab": "Scheda",
|
||||
"viewing": "Visualizza",
|
||||
"viewingDesc": "Rivedi le immagini in un'ampia vista della galleria",
|
||||
"editing": "Modifica",
|
||||
"editingDesc": "Modifica nell'area Livelli di controllo"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -180,8 +191,8 @@
|
||||
"desc": "Mostra le informazioni sui metadati dell'immagine corrente"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "Invia a Immagine a Immagine",
|
||||
"desc": "Invia l'immagine corrente a da Immagine a Immagine"
|
||||
"title": "Invia a Generazione da immagine",
|
||||
"desc": "Invia l'immagine corrente a Generazione da immagine"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "Elimina immagine",
|
||||
@@ -334,6 +345,10 @@
|
||||
"remixImage": {
|
||||
"desc": "Utilizza tutti i parametri tranne il seme dell'immagine corrente",
|
||||
"title": "Remixa l'immagine"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Attiva/disattiva il visualizzatore di immagini",
|
||||
"desc": "Passa dal Visualizzatore immagini all'area di lavoro per la scheda corrente."
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
@@ -471,8 +486,8 @@
|
||||
"scaledHeight": "Altezza ridimensionata",
|
||||
"infillMethod": "Metodo di riempimento",
|
||||
"tileSize": "Dimensione piastrella",
|
||||
"sendToImg2Img": "Invia a Immagine a Immagine",
|
||||
"sendToUnifiedCanvas": "Invia a Tela Unificata",
|
||||
"sendToImg2Img": "Invia a Generazione da immagine",
|
||||
"sendToUnifiedCanvas": "Invia alla Tela",
|
||||
"downloadImage": "Scarica l'immagine",
|
||||
"usePrompt": "Usa Prompt",
|
||||
"useSeed": "Usa Seme",
|
||||
@@ -508,13 +523,24 @@
|
||||
"incompatibleBaseModelForControlAdapter": "Il modello dell'adattatore di controllo #{{number}} non è compatibile con il modello principale.",
|
||||
"missingNodeTemplate": "Modello di nodo mancante",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} ingresso mancante",
|
||||
"missingFieldTemplate": "Modello di campo mancante"
|
||||
"missingFieldTemplate": "Modello di campo mancante",
|
||||
"imageNotProcessedForControlAdapter": "L'immagine dell'adattatore di controllo #{{number}} non è stata elaborata",
|
||||
"layer": {
|
||||
"initialImageNoImageSelected": "Nessuna immagine iniziale selezionata",
|
||||
"t2iAdapterIncompatibleDimensions": "L'adattatore T2I richiede che la dimensione dell'immagine sia un multiplo di {{multiple}}",
|
||||
"controlAdapterNoModelSelected": "Nessun modello di Adattatore di Controllo selezionato",
|
||||
"controlAdapterIncompatibleBaseModel": "Il modello base dell'adattatore di controllo non è compatibile",
|
||||
"controlAdapterNoImageSelected": "Nessuna immagine dell'adattatore di controllo selezionata",
|
||||
"controlAdapterImageNotProcessed": "Immagine dell'adattatore di controllo non elaborata",
|
||||
"ipAdapterNoModelSelected": "Nessun adattatore IP selezionato",
|
||||
"ipAdapterIncompatibleBaseModel": "Il modello base dell'adattatore IP non è compatibile",
|
||||
"ipAdapterNoImageSelected": "Nessuna immagine dell'adattatore IP selezionata",
|
||||
"rgNoPromptsOrIPAdapters": "Nessun prompt o adattatore IP",
|
||||
"rgNoRegion": "Nessuna regione selezionata"
|
||||
}
|
||||
},
|
||||
"useCpuNoise": "Usa la CPU per generare rumore",
|
||||
"iterations": "Iterazioni",
|
||||
"iterationsWithCount_one": "{{count}} Iterazione",
|
||||
"iterationsWithCount_many": "{{count}} Iterazioni",
|
||||
"iterationsWithCount_other": "{{count}} Iterazioni",
|
||||
"isAllowedToUpscale": {
|
||||
"useX2Model": "L'immagine è troppo grande per l'ampliamento con il modello x4, utilizza il modello x2",
|
||||
"tooLarge": "L'immagine è troppo grande per l'ampliamento, seleziona un'immagine più piccola"
|
||||
@@ -534,7 +560,10 @@
|
||||
"infillMosaicMinColor": "Colore minimo",
|
||||
"infillMosaicMaxColor": "Colore massimo",
|
||||
"infillMosaicTileHeight": "Altezza piastrella",
|
||||
"infillColorValue": "Colore di riempimento"
|
||||
"infillColorValue": "Colore di riempimento",
|
||||
"globalSettings": "Impostazioni globali",
|
||||
"globalPositivePromptPlaceholder": "Prompt positivo globale",
|
||||
"globalNegativePromptPlaceholder": "Prompt negativo globale"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelli",
|
||||
@@ -559,7 +588,7 @@
|
||||
"intermediatesCleared_one": "Cancellata {{count}} immagine intermedia",
|
||||
"intermediatesCleared_many": "Cancellate {{count}} immagini intermedie",
|
||||
"intermediatesCleared_other": "Cancellate {{count}} immagini intermedie",
|
||||
"clearIntermediatesDesc1": "La cancellazione delle immagini intermedie ripristinerà lo stato di Tela Unificata e ControlNet.",
|
||||
"clearIntermediatesDesc1": "La cancellazione delle immagini intermedie ripristinerà lo stato della Tela e degli Adattatori di Controllo.",
|
||||
"intermediatesClearedFailed": "Problema con la cancellazione delle immagini intermedie",
|
||||
"clearIntermediatesWithCount_one": "Cancella {{count}} immagine intermedia",
|
||||
"clearIntermediatesWithCount_many": "Cancella {{count}} immagini intermedie",
|
||||
@@ -575,8 +604,8 @@
|
||||
"imageCopied": "Immagine copiata",
|
||||
"imageNotLoadedDesc": "Impossibile trovare l'immagine",
|
||||
"canvasMerged": "Tela unita",
|
||||
"sentToImageToImage": "Inviato a Immagine a Immagine",
|
||||
"sentToUnifiedCanvas": "Inviato a Tela Unificata",
|
||||
"sentToImageToImage": "Inviato a Generazione da immagine",
|
||||
"sentToUnifiedCanvas": "Inviato alla Tela",
|
||||
"parametersNotSet": "Parametri non impostati",
|
||||
"metadataLoadFailed": "Impossibile caricare i metadati",
|
||||
"serverError": "Errore del Server",
|
||||
@@ -795,7 +824,7 @@
|
||||
"float": "In virgola mobile",
|
||||
"currentImageDescription": "Visualizza l'immagine corrente nell'editor dei nodi",
|
||||
"fieldTypesMustMatch": "I tipi di campo devono corrispondere",
|
||||
"edge": "Bordo",
|
||||
"edge": "Collegamento",
|
||||
"currentImage": "Immagine corrente",
|
||||
"integer": "Numero Intero",
|
||||
"inputMayOnlyHaveOneConnection": "L'ingresso può avere solo una connessione",
|
||||
@@ -808,8 +837,8 @@
|
||||
"unableToUpdateNodes_other": "Impossibile aggiornare {{count}} nodi",
|
||||
"addLinearView": "Aggiungi alla vista Lineare",
|
||||
"unknownErrorValidatingWorkflow": "Errore sconosciuto durante la convalida del flusso di lavoro",
|
||||
"collectionFieldType": "{{name}} Raccolta",
|
||||
"collectionOrScalarFieldType": "{{name}} Raccolta|Scalare",
|
||||
"collectionFieldType": "{{name}} (Raccolta)",
|
||||
"collectionOrScalarFieldType": "{{name}} (Singola o Raccolta)",
|
||||
"nodeVersion": "Versione Nodo",
|
||||
"inputFieldTypeParseError": "Impossibile analizzare il tipo di campo di input {{node}}.{{field}} ({{message}})",
|
||||
"unsupportedArrayItemType": "Tipo di elemento dell'array non supportato \"{{type}}\"",
|
||||
@@ -845,7 +874,15 @@
|
||||
"resetToDefaultValue": "Ripristina il valore predefinito",
|
||||
"noFieldsViewMode": "Questo flusso di lavoro non ha campi selezionati da visualizzare. Visualizza il flusso di lavoro completo per configurare i valori.",
|
||||
"edit": "Modifica",
|
||||
"graph": "Grafico"
|
||||
"graph": "Grafico",
|
||||
"showEdgeLabelsHelp": "Mostra etichette sui collegamenti, che indicano i nodi collegati",
|
||||
"showEdgeLabels": "Mostra le etichette del collegamento",
|
||||
"cannotMixAndMatchCollectionItemTypes": "Impossibile combinare e abbinare i tipi di elementi della raccolta",
|
||||
"noGraph": "Nessun grafico",
|
||||
"missingNode": "Nodo di invocazione mancante",
|
||||
"missingInvocationTemplate": "Modello di invocazione mancante",
|
||||
"missingFieldTemplate": "Modello di campo mancante",
|
||||
"singleFieldType": "{{name}} (Singola)"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@@ -922,7 +959,7 @@
|
||||
"colorMapTileSize": "Dimensione piastrella",
|
||||
"mediapipeFaceDescription": "Rilevamento dei volti tramite Mediapipe",
|
||||
"hedDescription": "Rilevamento dei bordi nidificati olisticamente",
|
||||
"setControlImageDimensions": "Imposta le dimensioni dell'immagine di controllo su L/A",
|
||||
"setControlImageDimensions": "Copia le dimensioni in L/A (ottimizza per il modello)",
|
||||
"maxFaces": "Numero massimo di volti",
|
||||
"addT2IAdapter": "Aggiungi $t(common.t2iAdapter)",
|
||||
"addControlNet": "Aggiungi $t(common.controlNet)",
|
||||
@@ -951,12 +988,17 @@
|
||||
"mediapipeFace": "Mediapipe Volto",
|
||||
"ip_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.ipAdapter))",
|
||||
"t2i_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.t2iAdapter))",
|
||||
"selectCLIPVisionModel": "Seleziona un modello CLIP Vision"
|
||||
"selectCLIPVisionModel": "Seleziona un modello CLIP Vision",
|
||||
"ipAdapterMethod": "Metodo",
|
||||
"full": "Completo",
|
||||
"composition": "Solo la composizione",
|
||||
"style": "Solo lo stile",
|
||||
"beginEndStepPercentShort": "Inizio/Fine %",
|
||||
"setControlImageDimensionsForce": "Copia le dimensioni in L/A (ignora il modello)"
|
||||
},
|
||||
"queue": {
|
||||
"queueFront": "Aggiungi all'inizio della coda",
|
||||
"queueBack": "Aggiungi alla coda",
|
||||
"queueCountPrediction": "{{promptsCount}} prompt × {{iterations}} iterazioni -> {{count}} generazioni",
|
||||
"queue": "Coda",
|
||||
"status": "Stato",
|
||||
"pruneSucceeded": "Rimossi {{item_count}} elementi completati dalla coda",
|
||||
@@ -993,7 +1035,7 @@
|
||||
"cancelBatchSucceeded": "Lotto annullato",
|
||||
"clearTooltip": "Annulla e cancella tutti gli elementi",
|
||||
"current": "Attuale",
|
||||
"pauseTooltip": "Sospende l'elaborazione",
|
||||
"pauseTooltip": "Sospendi l'elaborazione",
|
||||
"failed": "Falliti",
|
||||
"cancelItem": "Annulla l'elemento",
|
||||
"next": "Prossimo",
|
||||
@@ -1011,7 +1053,16 @@
|
||||
"graphFailedToQueue": "Impossibile mettere in coda il grafico",
|
||||
"batchFieldValues": "Valori Campi Lotto",
|
||||
"time": "Tempo",
|
||||
"openQueue": "Apri coda"
|
||||
"openQueue": "Apri coda",
|
||||
"iterations_one": "Iterazione",
|
||||
"iterations_many": "Iterazioni",
|
||||
"iterations_other": "Iterazioni",
|
||||
"prompts_one": "Prompt",
|
||||
"prompts_many": "Prompt",
|
||||
"prompts_other": "Prompt",
|
||||
"generations_one": "Generazione",
|
||||
"generations_many": "Generazioni",
|
||||
"generations_other": "Generazioni"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "Nessun modello corrispondente",
|
||||
@@ -1394,6 +1445,12 @@
|
||||
"paragraphs": [
|
||||
"La dimensione del bordo del passaggio di coerenza."
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Metodo",
|
||||
"paragraphs": [
|
||||
"Metodo con cui applicare l'adattatore IP corrente."
|
||||
]
|
||||
}
|
||||
},
|
||||
"sdxl": {
|
||||
@@ -1522,5 +1579,55 @@
|
||||
"compatibleEmbeddings": "Incorporamenti compatibili",
|
||||
"addPromptTrigger": "Aggiungi Trigger nel prompt",
|
||||
"noMatchingTriggers": "Nessun Trigger corrispondente"
|
||||
},
|
||||
"controlLayers": {
|
||||
"opacityFilter": "Filtro opacità",
|
||||
"deleteAll": "Cancella tutto",
|
||||
"addLayer": "Aggiungi Livello",
|
||||
"moveToFront": "Sposta in primo piano",
|
||||
"moveToBack": "Sposta in fondo",
|
||||
"moveForward": "Sposta avanti",
|
||||
"moveBackward": "Sposta indietro",
|
||||
"brushSize": "Dimensioni del pennello",
|
||||
"globalMaskOpacity": "Opacità globale della maschera",
|
||||
"autoNegative": "Auto Negativo",
|
||||
"deletePrompt": "Cancella il prompt",
|
||||
"debugLayers": "Debug dei Livelli",
|
||||
"rectangle": "Rettangolo",
|
||||
"maskPreviewColor": "Colore anteprima maschera",
|
||||
"addPositivePrompt": "Aggiungi $t(common.positivePrompt)",
|
||||
"addNegativePrompt": "Aggiungi $t(common.negativePrompt)",
|
||||
"addIPAdapter": "Aggiungi $t(common.ipAdapter)",
|
||||
"regionalGuidance": "Guida regionale",
|
||||
"regionalGuidanceLayer": "$t(unifiedCanvas.layer) $t(controlLayers.regionalGuidance)",
|
||||
"opacity": "Opacità",
|
||||
"globalControlAdapter": "$t(controlnet.controlAdapter_one) Globale",
|
||||
"globalControlAdapterLayer": "$t(controlnet.controlAdapter_one) - $t(unifiedCanvas.layer) Globale",
|
||||
"globalIPAdapter": "$t(common.ipAdapter) Globale",
|
||||
"globalIPAdapterLayer": "$t(common.ipAdapter) - $t(unifiedCanvas.layer) Globale",
|
||||
"globalInitialImage": "Immagine iniziale",
|
||||
"globalInitialImageLayer": "$t(controlLayers.globalInitialImage) - $t(unifiedCanvas.layer) Globale",
|
||||
"clearProcessor": "Cancella processore",
|
||||
"resetProcessor": "Ripristina il processore alle impostazioni predefinite",
|
||||
"noLayersAdded": "Nessun livello aggiunto",
|
||||
"resetRegion": "Reimposta la regione",
|
||||
"controlLayers": "Livelli di controllo",
|
||||
"layers_one": "Livello",
|
||||
"layers_many": "Livelli",
|
||||
"layers_other": "Livelli"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "Generazione",
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"canvas": "Tela",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"workflows": "Flussi di lavoro",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"models": "Modelli",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"queue": "Coda",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -570,7 +570,6 @@
|
||||
"pauseSucceeded": "処理が一時停止されました",
|
||||
"queueFront": "キューの先頭へ追加",
|
||||
"queueBack": "キューに追加",
|
||||
"queueCountPrediction": "{{promptsCount}} プロンプト × {{iterations}} イテレーション -> {{count}} 枚生成",
|
||||
"pause": "一時停止",
|
||||
"queue": "キュー",
|
||||
"pauseTooltip": "処理を一時停止",
|
||||
|
||||
@@ -505,7 +505,6 @@
|
||||
"completed": "완성된",
|
||||
"queueBack": "Queue에 추가",
|
||||
"cancelFailed": "항목 취소 중 발생한 문제",
|
||||
"queueCountPrediction": "Queue에 {{predicted}} 추가",
|
||||
"batchQueued": "Batch Queued",
|
||||
"pauseFailed": "프로세서 중지 중 발생한 문제",
|
||||
"clearFailed": "Queue 제거 중 발생한 문제",
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"settingsLabel": "Instellingen",
|
||||
"img2img": "Afbeelding naar afbeelding",
|
||||
"unifiedCanvas": "Centraal canvas",
|
||||
"nodes": "Werkstroom-editor",
|
||||
"nodes": "Werkstromen",
|
||||
"upload": "Upload",
|
||||
"load": "Laad",
|
||||
"statusDisconnected": "Niet verbonden",
|
||||
@@ -34,7 +34,60 @@
|
||||
"controlNet": "ControlNet",
|
||||
"imageFailedToLoad": "Kan afbeelding niet laden",
|
||||
"learnMore": "Meer informatie",
|
||||
"advanced": "Uitgebreid"
|
||||
"advanced": "Uitgebreid",
|
||||
"file": "Bestand",
|
||||
"installed": "Geïnstalleerd",
|
||||
"notInstalled": "Niet $t(common.installed)",
|
||||
"simple": "Eenvoudig",
|
||||
"somethingWentWrong": "Er ging iets mis",
|
||||
"add": "Voeg toe",
|
||||
"checkpoint": "Checkpoint",
|
||||
"details": "Details",
|
||||
"outputs": "Uitvoeren",
|
||||
"save": "Bewaar",
|
||||
"nextPage": "Volgende pagina",
|
||||
"blue": "Blauw",
|
||||
"alpha": "Alfa",
|
||||
"red": "Rood",
|
||||
"editor": "Editor",
|
||||
"folder": "Map",
|
||||
"format": "structuur",
|
||||
"goTo": "Ga naar",
|
||||
"template": "Sjabloon",
|
||||
"input": "Invoer",
|
||||
"loglevel": "Logboekniveau",
|
||||
"safetensors": "Safetensors",
|
||||
"saveAs": "Bewaar als",
|
||||
"created": "Gemaakt",
|
||||
"green": "Groen",
|
||||
"tab": "Tab",
|
||||
"positivePrompt": "Positieve prompt",
|
||||
"negativePrompt": "Negatieve prompt",
|
||||
"selected": "Geselecteerd",
|
||||
"orderBy": "Sorteer op",
|
||||
"prevPage": "Vorige pagina",
|
||||
"beta": "Bèta",
|
||||
"copyError": "$t(gallery.copy) Fout",
|
||||
"toResolve": "Op te lossen",
|
||||
"aboutDesc": "Gebruik je Invoke voor het werk? Kijk dan naar:",
|
||||
"aboutHeading": "Creatieve macht voor jou",
|
||||
"copy": "Kopieer",
|
||||
"data": "Gegevens",
|
||||
"or": "of",
|
||||
"updated": "Bijgewerkt",
|
||||
"outpaint": "outpainten",
|
||||
"viewing": "Bekijken",
|
||||
"viewingDesc": "Beoordeel afbeelding in een grote galerijweergave",
|
||||
"editing": "Bewerken",
|
||||
"editingDesc": "Bewerk op het canvas Stuurlagen",
|
||||
"ai": "ai",
|
||||
"inpaint": "inpainten",
|
||||
"unknown": "Onbekend",
|
||||
"delete": "Verwijder",
|
||||
"direction": "Richting",
|
||||
"error": "Fout",
|
||||
"localSystem": "Lokaal systeem",
|
||||
"unknownError": "Onbekende fout"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Afbeeldingsgrootte",
|
||||
@@ -310,10 +363,41 @@
|
||||
"modelSyncFailed": "Synchronisatie modellen mislukt",
|
||||
"modelDeleteFailed": "Model kon niet verwijderd worden",
|
||||
"convertingModelBegin": "Model aan het converteren. Even geduld.",
|
||||
"predictionType": "Soort voorspelling (voor Stable Diffusion 2.x-modellen en incidentele Stable Diffusion 1.x-modellen)",
|
||||
"predictionType": "Soort voorspelling",
|
||||
"advanced": "Uitgebreid",
|
||||
"modelType": "Soort model",
|
||||
"vaePrecision": "Nauwkeurigheid VAE"
|
||||
"vaePrecision": "Nauwkeurigheid VAE",
|
||||
"loraTriggerPhrases": "LoRA-triggerzinnen",
|
||||
"urlOrLocalPathHelper": "URL's zouden moeten wijzen naar een los bestand. Lokale paden kunnen wijzen naar een los bestand of map voor een individueel Diffusers-model.",
|
||||
"modelName": "Modelnaam",
|
||||
"path": "Pad",
|
||||
"triggerPhrases": "Triggerzinnen",
|
||||
"typePhraseHere": "Typ zin hier in",
|
||||
"useDefaultSettings": "Gebruik standaardinstellingen",
|
||||
"modelImageDeleteFailed": "Fout bij verwijderen modelafbeelding",
|
||||
"modelImageUpdated": "Modelafbeelding bijgewerkt",
|
||||
"modelImageUpdateFailed": "Fout bij bijwerken modelafbeelding",
|
||||
"noMatchingModels": "Geen overeenkomende modellen",
|
||||
"scanPlaceholder": "Pad naar een lokale map",
|
||||
"noModelsInstalled": "Geen modellen geïnstalleerd",
|
||||
"noModelsInstalledDesc1": "Installeer modellen met de",
|
||||
"noModelSelected": "Geen model geselecteerd",
|
||||
"starterModels": "Beginnermodellen",
|
||||
"textualInversions": "Tekstuele omkeringen",
|
||||
"upcastAttention": "Upcast-aandacht",
|
||||
"uploadImage": "Upload afbeelding",
|
||||
"mainModelTriggerPhrases": "Triggerzinnen hoofdmodel",
|
||||
"urlOrLocalPath": "URL of lokaal pad",
|
||||
"scanFolderHelper": "De map zal recursief worden ingelezen voor modellen. Dit kan enige tijd in beslag nemen voor erg grote mappen.",
|
||||
"simpleModelPlaceholder": "URL of pad naar een lokaal pad of Diffusers-map",
|
||||
"modelSettings": "Modelinstellingen",
|
||||
"pathToConfig": "Pad naar configuratie",
|
||||
"prune": "Snoei",
|
||||
"pruneTooltip": "Snoei voltooide importeringen uit wachtrij",
|
||||
"repoVariant": "Repovariant",
|
||||
"scanFolder": "Lees map in",
|
||||
"scanResults": "Resultaten inlezen",
|
||||
"source": "Bron"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Afbeeldingen",
|
||||
@@ -353,13 +437,13 @@
|
||||
"copyImage": "Kopieer afbeelding",
|
||||
"denoisingStrength": "Sterkte ontruisen",
|
||||
"scheduler": "Planner",
|
||||
"seamlessXAxis": "X-as",
|
||||
"seamlessYAxis": "Y-as",
|
||||
"seamlessXAxis": "Naadloze tegels in x-as",
|
||||
"seamlessYAxis": "Naadloze tegels in y-as",
|
||||
"clipSkip": "Overslaan CLIP",
|
||||
"negativePromptPlaceholder": "Negatieve prompt",
|
||||
"controlNetControlMode": "Aansturingsmodus",
|
||||
"positivePromptPlaceholder": "Positieve prompt",
|
||||
"maskBlur": "Vervaag",
|
||||
"maskBlur": "Vervaging van masker",
|
||||
"invoke": {
|
||||
"noNodesInGraph": "Geen knooppunten in graaf",
|
||||
"noModelSelected": "Geen model ingesteld",
|
||||
@@ -369,11 +453,25 @@
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} invoer ontbreekt",
|
||||
"noControlImageForControlAdapter": "Controle-adapter #{{number}} heeft geen controle-afbeelding",
|
||||
"noModelForControlAdapter": "Control-adapter #{{number}} heeft geen model ingesteld staan.",
|
||||
"incompatibleBaseModelForControlAdapter": "Model van controle-adapter #{{number}} is ongeldig in combinatie met het hoofdmodel.",
|
||||
"incompatibleBaseModelForControlAdapter": "Model van controle-adapter #{{number}} is niet compatibel met het hoofdmodel.",
|
||||
"systemDisconnected": "Systeem is niet verbonden",
|
||||
"missingNodeTemplate": "Knooppuntsjabloon ontbreekt",
|
||||
"missingFieldTemplate": "Veldsjabloon ontbreekt",
|
||||
"addingImagesTo": "Bezig met toevoegen van afbeeldingen aan"
|
||||
"addingImagesTo": "Bezig met toevoegen van afbeeldingen aan",
|
||||
"layer": {
|
||||
"initialImageNoImageSelected": "geen initiële afbeelding geselecteerd",
|
||||
"controlAdapterNoModelSelected": "geen controle-adaptermodel geselecteerd",
|
||||
"controlAdapterIncompatibleBaseModel": "niet-compatibele basismodel voor controle-adapter",
|
||||
"controlAdapterNoImageSelected": "geen afbeelding voor controle-adapter geselecteerd",
|
||||
"controlAdapterImageNotProcessed": "Afbeelding voor controle-adapter niet verwerkt",
|
||||
"ipAdapterIncompatibleBaseModel": "niet-compatibele basismodel voor IP-adapter",
|
||||
"ipAdapterNoImageSelected": "geen afbeelding voor IP-adapter geselecteerd",
|
||||
"rgNoRegion": "geen gebied geselecteerd",
|
||||
"rgNoPromptsOrIPAdapters": "geen tekstprompts of IP-adapters",
|
||||
"t2iAdapterIncompatibleDimensions": "T2I-adapter vereist een afbeelding met afmetingen met een veelvoud van 64",
|
||||
"ipAdapterNoModelSelected": "geen IP-adapter geselecteerd"
|
||||
},
|
||||
"imageNotProcessedForControlAdapter": "De afbeelding van controle-adapter #{{number}} is niet verwerkt"
|
||||
},
|
||||
"isAllowedToUpscale": {
|
||||
"useX2Model": "Afbeelding is te groot om te vergroten met het x4-model. Gebruik hiervoor het x2-model",
|
||||
@@ -383,9 +481,26 @@
|
||||
"useCpuNoise": "Gebruik CPU-ruis",
|
||||
"imageActions": "Afbeeldingshandeling",
|
||||
"iterations": "Iteraties",
|
||||
"iterationsWithCount_one": "{{count}} iteratie",
|
||||
"iterationsWithCount_other": "{{count}} iteraties",
|
||||
"coherenceMode": "Modus"
|
||||
"coherenceMode": "Modus",
|
||||
"infillColorValue": "Vulkleur",
|
||||
"remixImage": "Meng afbeelding opnieuw",
|
||||
"setToOptimalSize": "Optimaliseer grootte voor het model",
|
||||
"setToOptimalSizeTooSmall": "$t(parameters.setToOptimalSize) (is mogelijk te klein)",
|
||||
"aspect": "Beeldverhouding",
|
||||
"infillMosaicTileWidth": "Breedte tegel",
|
||||
"setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (is mogelijk te groot)",
|
||||
"lockAspectRatio": "Zet beeldverhouding vast",
|
||||
"infillMosaicTileHeight": "Hoogte tegel",
|
||||
"globalNegativePromptPlaceholder": "Globale negatieve prompt",
|
||||
"globalPositivePromptPlaceholder": "Globale positieve prompt",
|
||||
"useSize": "Gebruik grootte",
|
||||
"swapDimensions": "Wissel afmetingen om",
|
||||
"globalSettings": "Globale instellingen",
|
||||
"coherenceEdgeSize": "Randgrootte",
|
||||
"coherenceMinDenoise": "Min. ontruising",
|
||||
"infillMosaicMinColor": "Min. kleur",
|
||||
"infillMosaicMaxColor": "Max. kleur",
|
||||
"cfgRescaleMultiplier": "Vermenigvuldiger voor CFG-herschaling"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modellen",
|
||||
@@ -412,7 +527,12 @@
|
||||
"intermediatesCleared_one": "{{count}} tussentijdse afbeelding gewist",
|
||||
"intermediatesCleared_other": "{{count}} tussentijdse afbeeldingen gewist",
|
||||
"clearIntermediatesDesc1": "Als je tussentijdse afbeeldingen wist, dan wordt de staat hersteld van je canvas en van ControlNet.",
|
||||
"intermediatesClearedFailed": "Fout bij wissen van tussentijdse afbeeldingen"
|
||||
"intermediatesClearedFailed": "Fout bij wissen van tussentijdse afbeeldingen",
|
||||
"clearIntermediatesDisabled": "Wachtrij moet leeg zijn om tussentijdse afbeeldingen te kunnen leegmaken",
|
||||
"enableInformationalPopovers": "Schakel informatieve hulpballonnen in",
|
||||
"enableInvisibleWatermark": "Schakel onzichtbaar watermerk in",
|
||||
"enableNSFWChecker": "Schakel NSFW-controle in",
|
||||
"reloadingIn": "Opnieuw laden na"
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "Upload mislukt",
|
||||
@@ -427,8 +547,8 @@
|
||||
"connected": "Verbonden met server",
|
||||
"canceled": "Verwerking geannuleerd",
|
||||
"uploadFailedInvalidUploadDesc": "Moet een enkele PNG- of JPEG-afbeelding zijn",
|
||||
"parameterNotSet": "Parameter niet ingesteld",
|
||||
"parameterSet": "Instellen parameters",
|
||||
"parameterNotSet": "{{parameter}} niet ingesteld",
|
||||
"parameterSet": "{{parameter}} ingesteld",
|
||||
"problemCopyingImage": "Kan Afbeelding Niet Kopiëren",
|
||||
"baseModelChangedCleared_one": "Basismodel is gewijzigd: {{count}} niet-compatibel submodel weggehaald of uitgeschakeld",
|
||||
"baseModelChangedCleared_other": "Basismodel is gewijzigd: {{count}} niet-compatibele submodellen weggehaald of uitgeschakeld",
|
||||
@@ -445,11 +565,11 @@
|
||||
"maskSavedAssets": "Masker bewaard in Assets",
|
||||
"problemDownloadingCanvas": "Fout bij downloaden van canvas",
|
||||
"problemMergingCanvas": "Fout bij samenvoegen canvas",
|
||||
"setCanvasInitialImage": "Ingesteld als initiële canvasafbeelding",
|
||||
"setCanvasInitialImage": "Initiële canvasafbeelding ingesteld",
|
||||
"imageUploaded": "Afbeelding geüpload",
|
||||
"addedToBoard": "Toegevoegd aan bord",
|
||||
"workflowLoaded": "Werkstroom geladen",
|
||||
"modelAddedSimple": "Model toegevoegd",
|
||||
"modelAddedSimple": "Model toegevoegd aan wachtrij",
|
||||
"problemImportingMaskDesc": "Kan masker niet exporteren",
|
||||
"problemCopyingCanvas": "Fout bij kopiëren canvas",
|
||||
"problemSavingCanvas": "Fout bij bewaren canvas",
|
||||
@@ -461,7 +581,18 @@
|
||||
"maskSentControlnetAssets": "Masker gestuurd naar ControlNet en Assets",
|
||||
"canvasSavedGallery": "Canvas bewaard in galerij",
|
||||
"imageUploadFailed": "Fout bij uploaden afbeelding",
|
||||
"problemImportingMask": "Fout bij importeren masker"
|
||||
"problemImportingMask": "Fout bij importeren masker",
|
||||
"workflowDeleted": "Werkstroom verwijderd",
|
||||
"invalidUpload": "Ongeldige upload",
|
||||
"uploadInitialImage": "Initiële afbeelding uploaden",
|
||||
"setAsCanvasInitialImage": "Ingesteld als initiële afbeelding voor canvas",
|
||||
"problemRetrievingWorkflow": "Fout bij ophalen van werkstroom",
|
||||
"parameters": "Parameters",
|
||||
"modelImportCanceled": "Importeren model geannuleerd",
|
||||
"problemDeletingWorkflow": "Fout bij verwijderen van werkstroom",
|
||||
"prunedQueue": "Wachtrij gesnoeid",
|
||||
"problemDownloadingImage": "Fout bij downloaden afbeelding",
|
||||
"resetInitialImage": "Initiële afbeelding hersteld"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
@@ -535,7 +666,11 @@
|
||||
"showOptionsPanel": "Toon zijscherm",
|
||||
"menu": "Menu",
|
||||
"showGalleryPanel": "Toon deelscherm Galerij",
|
||||
"loadMore": "Laad meer"
|
||||
"loadMore": "Laad meer",
|
||||
"about": "Over",
|
||||
"mode": "Modus",
|
||||
"resetUI": "$t(accessibility.reset) UI",
|
||||
"createIssue": "Maak probleem aan"
|
||||
},
|
||||
"nodes": {
|
||||
"zoomOutNodes": "Uitzoomen",
|
||||
@@ -549,7 +684,7 @@
|
||||
"loadWorkflow": "Laad werkstroom",
|
||||
"downloadWorkflow": "Download JSON van werkstroom",
|
||||
"scheduler": "Planner",
|
||||
"missingTemplate": "Ontbrekende sjabloon",
|
||||
"missingTemplate": "Ongeldig knooppunt: knooppunt {{node}} van het soort {{type}} heeft een ontbrekend sjabloon (niet geïnstalleerd?)",
|
||||
"workflowDescription": "Korte beschrijving",
|
||||
"versionUnknown": " Versie onbekend",
|
||||
"noNodeSelected": "Geen knooppunt gekozen",
|
||||
@@ -565,7 +700,7 @@
|
||||
"integer": "Geheel getal",
|
||||
"nodeTemplate": "Sjabloon knooppunt",
|
||||
"nodeOpacity": "Dekking knooppunt",
|
||||
"unableToLoadWorkflow": "Kan werkstroom niet valideren",
|
||||
"unableToLoadWorkflow": "Fout bij laden werkstroom",
|
||||
"snapToGrid": "Lijn uit op raster",
|
||||
"noFieldsLinearview": "Geen velden toegevoegd aan lineaire weergave",
|
||||
"nodeSearch": "Zoek naar knooppunten",
|
||||
@@ -616,11 +751,56 @@
|
||||
"unknownField": "Onbekend veld",
|
||||
"colorCodeEdges": "Kleurgecodeerde randen",
|
||||
"unknownNode": "Onbekend knooppunt",
|
||||
"mismatchedVersion": "Heeft niet-overeenkomende versie",
|
||||
"mismatchedVersion": "Ongeldig knooppunt: knooppunt {{node}} van het soort {{type}} heeft een niet-overeenkomende versie (probeer het bij te werken?)",
|
||||
"addNodeToolTip": "Voeg knooppunt toe (Shift+A, spatie)",
|
||||
"loadingNodes": "Bezig met laden van knooppunten...",
|
||||
"snapToGridHelp": "Lijn knooppunten uit op raster bij verplaatsing",
|
||||
"workflowSettings": "Instellingen werkstroomeditor"
|
||||
"workflowSettings": "Instellingen werkstroomeditor",
|
||||
"addLinearView": "Voeg toe aan lineaire weergave",
|
||||
"nodePack": "Knooppuntpakket",
|
||||
"unknownInput": "Onbekende invoer: {{name}}",
|
||||
"sourceNodeFieldDoesNotExist": "Ongeldige rand: bron-/uitvoerveld {{node}}.{{field}} bestaat niet",
|
||||
"collectionFieldType": "Verzameling {{name}}",
|
||||
"deletedInvalidEdge": "Ongeldige hoek {{source}} -> {{target}} verwijderd",
|
||||
"graph": "Grafiek",
|
||||
"targetNodeDoesNotExist": "Ongeldige rand: doel-/invoerknooppunt {{node}} bestaat niet",
|
||||
"resetToDefaultValue": "Herstel naar standaardwaarden",
|
||||
"editMode": "Bewerk in Werkstroom-editor",
|
||||
"showEdgeLabels": "Toon randlabels",
|
||||
"showEdgeLabelsHelp": "Toon labels aan randen, waarmee de verbonden knooppunten mee worden aangegeven",
|
||||
"clearWorkflowDesc2": "Je huidige werkstroom heeft niet-bewaarde wijzigingen.",
|
||||
"unableToParseFieldType": "fout bij bepalen soort veld",
|
||||
"sourceNodeDoesNotExist": "Ongeldige rand: bron-/uitvoerknooppunt {{node}} bestaat niet",
|
||||
"unsupportedArrayItemType": "niet-ondersteunde soort van het array-onderdeel \"{{type}}\"",
|
||||
"targetNodeFieldDoesNotExist": "Ongeldige rand: doel-/invoerveld {{node}}.{{field}} bestaat niet",
|
||||
"reorderLinearView": "Herorden lineaire weergave",
|
||||
"newWorkflowDesc": "Een nieuwe werkstroom aanmaken?",
|
||||
"collectionOrScalarFieldType": "Verzameling|scalair {{name}}",
|
||||
"newWorkflow": "Nieuwe werkstroom",
|
||||
"unknownErrorValidatingWorkflow": "Onbekende fout bij valideren werkstroom",
|
||||
"unsupportedAnyOfLength": "te veel union-leden ({{count}})",
|
||||
"unknownOutput": "Onbekende uitvoer: {{name}}",
|
||||
"viewMode": "Gebruik in lineaire weergave",
|
||||
"unableToExtractSchemaNameFromRef": "fout bij het extraheren van de schemanaam via de ref",
|
||||
"unsupportedMismatchedUnion": "niet-overeenkomende soort CollectionOrScalar met basissoorten {{firstType}} en {{secondType}}",
|
||||
"unknownNodeType": "Onbekend soort knooppunt",
|
||||
"edit": "Bewerk",
|
||||
"updateAllNodes": "Werk knooppunten bij",
|
||||
"allNodesUpdated": "Alle knooppunten bijgewerkt",
|
||||
"nodeVersion": "Knooppuntversie",
|
||||
"newWorkflowDesc2": "Je huidige werkstroom heeft niet-bewaarde wijzigingen.",
|
||||
"clearWorkflow": "Maak werkstroom leeg",
|
||||
"clearWorkflowDesc": "Deze werkstroom leegmaken en met een nieuwe beginnen?",
|
||||
"inputFieldTypeParseError": "Fout bij bepalen van het soort invoerveld {{node}}.{{field}} ({{message}})",
|
||||
"outputFieldTypeParseError": "Fout bij het bepalen van het soort uitvoerveld {{node}}.{{field}} ({{message}})",
|
||||
"unableToExtractEnumOptions": "fout bij extraheren enumeratie-opties",
|
||||
"unknownFieldType": "Soort $t(nodes.unknownField): {{type}}",
|
||||
"unableToGetWorkflowVersion": "Fout bij ophalen schemaversie van werkstroom",
|
||||
"betaDesc": "Deze uitvoering is in bèta. Totdat deze stabiel is kunnen er wijzigingen voorkomen gedurende app-updates die zaken kapotmaken. We zijn van plan om deze uitvoering op lange termijn te gaan ondersteunen.",
|
||||
"prototypeDesc": "Deze uitvoering is een prototype. Er kunnen wijzigingen voorkomen gedurende app-updates die zaken kapotmaken. Deze kunnen op een willekeurig moment verwijderd worden.",
|
||||
"noFieldsViewMode": "Deze werkstroom heeft geen geselecteerde velden om te tonen. Bekijk de volledige werkstroom om de waarden te configureren.",
|
||||
"unableToUpdateNodes_one": "Fout bij bijwerken van {{count}} knooppunt",
|
||||
"unableToUpdateNodes_other": "Fout bij bijwerken van {{count}} knooppunten"
|
||||
},
|
||||
"controlnet": {
|
||||
"amult": "a_mult",
|
||||
@@ -693,9 +873,28 @@
|
||||
"canny": "Canny",
|
||||
"depthZoeDescription": "Genereer diepteblad via Zoe",
|
||||
"hedDescription": "Herkenning van holistisch-geneste randen",
|
||||
"setControlImageDimensions": "Stel afmetingen controle-afbeelding in op B/H",
|
||||
"setControlImageDimensions": "Kopieer grootte naar B/H (optimaliseer voor model)",
|
||||
"scribble": "Krabbel",
|
||||
"maxFaces": "Max. gezichten"
|
||||
"maxFaces": "Max. gezichten",
|
||||
"dwOpenpose": "DW Openpose",
|
||||
"depthAnything": "Depth Anything",
|
||||
"base": "Basis",
|
||||
"hands": "Handen",
|
||||
"selectCLIPVisionModel": "Selecteer een CLIP Vision-model",
|
||||
"modelSize": "Modelgrootte",
|
||||
"small": "Klein",
|
||||
"large": "Groot",
|
||||
"resizeSimple": "Wijzig grootte (eenvoudig)",
|
||||
"beginEndStepPercentShort": "Begin-/eind-%",
|
||||
"depthAnythingDescription": "Genereren dieptekaart d.m.v. de techniek Depth Anything",
|
||||
"face": "Gezicht",
|
||||
"body": "Lichaam",
|
||||
"dwOpenposeDescription": "Schatting menselijke pose d.m.v. DW Openpose",
|
||||
"ipAdapterMethod": "Methode",
|
||||
"full": "Volledig",
|
||||
"style": "Alleen stijl",
|
||||
"composition": "Alleen samenstelling",
|
||||
"setControlImageDimensionsForce": "Kopieer grootte naar B/H (negeer model)"
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
"seedBehaviour": {
|
||||
@@ -708,7 +907,10 @@
|
||||
"maxPrompts": "Max. prompts",
|
||||
"promptsWithCount_one": "{{count}} prompt",
|
||||
"promptsWithCount_other": "{{count}} prompts",
|
||||
"dynamicPrompts": "Dynamische prompts"
|
||||
"dynamicPrompts": "Dynamische prompts",
|
||||
"showDynamicPrompts": "Toon dynamische prompts",
|
||||
"loading": "Genereren van dynamische prompts...",
|
||||
"promptsPreview": "Voorvertoning prompts"
|
||||
},
|
||||
"popovers": {
|
||||
"noiseUseCPU": {
|
||||
@@ -721,7 +923,7 @@
|
||||
},
|
||||
"paramScheduler": {
|
||||
"paragraphs": [
|
||||
"De planner bepaalt hoe ruis per iteratie wordt toegevoegd aan een afbeelding of hoe een monster wordt bijgewerkt op basis van de uitvoer van een model."
|
||||
"De planner gebruikt gedurende het genereringsproces."
|
||||
],
|
||||
"heading": "Planner"
|
||||
},
|
||||
@@ -808,8 +1010,8 @@
|
||||
},
|
||||
"clipSkip": {
|
||||
"paragraphs": [
|
||||
"Kies hoeveel CLIP-modellagen je wilt overslaan.",
|
||||
"Bepaalde modellen werken beter met bepaalde Overslaan CLIP-instellingen."
|
||||
"Aantal over te slaan CLIP-modellagen.",
|
||||
"Bepaalde modellen zijn beter geschikt met bepaalde Overslaan CLIP-instellingen."
|
||||
],
|
||||
"heading": "Overslaan CLIP"
|
||||
},
|
||||
@@ -940,7 +1142,6 @@
|
||||
"completed": "Voltooid",
|
||||
"queueBack": "Voeg toe aan wachtrij",
|
||||
"cancelFailed": "Fout bij annuleren onderdeel",
|
||||
"queueCountPrediction": "Voeg {{predicted}} toe aan wachtrij",
|
||||
"batchQueued": "Reeks in wachtrij geplaatst",
|
||||
"pauseFailed": "Fout bij onderbreken verwerker",
|
||||
"clearFailed": "Fout bij wissen van wachtrij",
|
||||
@@ -994,17 +1195,26 @@
|
||||
"denoisingStrength": "Sterkte ontruising",
|
||||
"refinermodel": "Verfijningsmodel",
|
||||
"posAestheticScore": "Positieve esthetische score",
|
||||
"concatPromptStyle": "Plak prompt- en stijltekst aan elkaar",
|
||||
"concatPromptStyle": "Koppelen van prompt en stijl",
|
||||
"loading": "Bezig met laden...",
|
||||
"steps": "Stappen",
|
||||
"posStylePrompt": "Positieve-stijlprompt"
|
||||
"posStylePrompt": "Positieve-stijlprompt",
|
||||
"freePromptStyle": "Handmatige stijlprompt",
|
||||
"refinerSteps": "Aantal stappen verfijner"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "Geen overeenkomend modellen",
|
||||
"loading": "bezig met laden",
|
||||
"noMatchingLoRAs": "Geen overeenkomende LoRA's",
|
||||
"noModelsAvailable": "Geen modellen beschikbaar",
|
||||
"selectModel": "Kies een model"
|
||||
"selectModel": "Kies een model",
|
||||
"noLoRAsInstalled": "Geen LoRA's geïnstalleerd",
|
||||
"noRefinerModelsInstalled": "Geen SDXL-verfijningsmodellen geïnstalleerd",
|
||||
"defaultVAE": "Standaard-VAE",
|
||||
"lora": "LoRA",
|
||||
"esrganModel": "ESRGAN-model",
|
||||
"addLora": "Voeg LoRA toe",
|
||||
"concepts": "Concepten"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Voeg automatisch bord toe",
|
||||
@@ -1022,7 +1232,13 @@
|
||||
"downloadBoard": "Download bord",
|
||||
"changeBoard": "Wijzig bord",
|
||||
"loading": "Bezig met laden...",
|
||||
"clearSearch": "Maak zoekopdracht leeg"
|
||||
"clearSearch": "Maak zoekopdracht leeg",
|
||||
"deleteBoard": "Verwijder bord",
|
||||
"deleteBoardAndImages": "Verwijder bord en afbeeldingen",
|
||||
"deleteBoardOnly": "Verwijder alleen bord",
|
||||
"deletedBoardsCannotbeRestored": "Verwijderde borden kunnen niet worden hersteld",
|
||||
"movingImagesToBoard_one": "Verplaatsen van {{count}} afbeelding naar bord:",
|
||||
"movingImagesToBoard_other": "Verplaatsen van {{count}} afbeeldingen naar bord:"
|
||||
},
|
||||
"invocationCache": {
|
||||
"disable": "Schakel uit",
|
||||
@@ -1039,5 +1255,39 @@
|
||||
"clear": "Wis",
|
||||
"maxCacheSize": "Max. grootte cache",
|
||||
"cacheSize": "Grootte cache"
|
||||
},
|
||||
"accordions": {
|
||||
"generation": {
|
||||
"title": "Genereren"
|
||||
},
|
||||
"image": {
|
||||
"title": "Afbeelding"
|
||||
},
|
||||
"advanced": {
|
||||
"title": "Geavanceerd",
|
||||
"options": "$t(accordions.advanced.title) Opties"
|
||||
},
|
||||
"control": {
|
||||
"title": "Besturing"
|
||||
},
|
||||
"compositing": {
|
||||
"title": "Samenstellen",
|
||||
"coherenceTab": "Coherentiefase",
|
||||
"infillTab": "Invullen"
|
||||
}
|
||||
},
|
||||
"hrf": {
|
||||
"upscaleMethod": "Opschaalmethode",
|
||||
"metadata": {
|
||||
"strength": "Sterkte oplossing voor hoge resolutie",
|
||||
"method": "Methode oplossing voor hoge resolutie",
|
||||
"enabled": "Oplossing voor hoge resolutie ingeschakeld"
|
||||
},
|
||||
"hrf": "Oplossing voor hoge resolutie",
|
||||
"enableHrf": "Schakel oplossing in voor hoge resolutie"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Voeg prompttrigger toe",
|
||||
"compatibleEmbeddings": "Compatibele embeddings"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,7 +76,18 @@
|
||||
"localSystem": "Локальная система",
|
||||
"aboutDesc": "Используя Invoke для работы? Проверьте это:",
|
||||
"add": "Добавить",
|
||||
"loglevel": "Уровень логов"
|
||||
"loglevel": "Уровень логов",
|
||||
"beta": "Бета",
|
||||
"selected": "Выбрано",
|
||||
"positivePrompt": "Позитивный запрос",
|
||||
"negativePrompt": "Негативный запрос",
|
||||
"editor": "Редактор",
|
||||
"goTo": "Перейти к",
|
||||
"tab": "Вкладка",
|
||||
"viewing": "Просмотр",
|
||||
"editing": "Редактирование",
|
||||
"viewingDesc": "Просмотр изображений в режиме большой галереи",
|
||||
"editingDesc": "Редактировать на холсте слоёв управления"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Размер изображений",
|
||||
@@ -87,8 +98,8 @@
|
||||
"deleteImagePermanent": "Удаленные изображения невозможно восстановить.",
|
||||
"deleteImageBin": "Удаленные изображения будут отправлены в корзину вашей операционной системы.",
|
||||
"deleteImage_one": "Удалить изображение",
|
||||
"deleteImage_few": "",
|
||||
"deleteImage_many": "",
|
||||
"deleteImage_few": "Удалить {{count}} изображения",
|
||||
"deleteImage_many": "Удалить {{count}} изображений",
|
||||
"assets": "Ресурсы",
|
||||
"autoAssignBoardOnClick": "Авто-назначение доски по клику",
|
||||
"deleteSelection": "Удалить выделенное",
|
||||
@@ -336,6 +347,10 @@
|
||||
"remixImage": {
|
||||
"desc": "Используйте все параметры, кроме сида из текущего изображения",
|
||||
"title": "Ремикс изображения"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Переключить просмотр изображений",
|
||||
"desc": "Переключение между средством просмотра изображений и рабочей областью для текущей вкладки."
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
@@ -512,7 +527,8 @@
|
||||
"missingNodeTemplate": "Отсутствует шаблон узла",
|
||||
"missingFieldTemplate": "Отсутствует шаблон поля",
|
||||
"addingImagesTo": "Добавление изображений в",
|
||||
"invoke": "Создать"
|
||||
"invoke": "Создать",
|
||||
"imageNotProcessedForControlAdapter": "Изображение адаптера контроля №{{number}} не обрабатывается"
|
||||
},
|
||||
"isAllowedToUpscale": {
|
||||
"useX2Model": "Изображение слишком велико для увеличения с помощью модели x4. Используйте модель x2",
|
||||
@@ -523,9 +539,6 @@
|
||||
"useCpuNoise": "Использовать шум CPU",
|
||||
"imageActions": "Действия с изображениями",
|
||||
"iterations": "Кол-во",
|
||||
"iterationsWithCount_one": "{{count}} Интеграция",
|
||||
"iterationsWithCount_few": "{{count}} Итерации",
|
||||
"iterationsWithCount_many": "{{count}} Итераций",
|
||||
"useSize": "Использовать размер",
|
||||
"coherenceMode": "Режим",
|
||||
"aspect": "Соотношение",
|
||||
@@ -541,7 +554,10 @@
|
||||
"infillMosaicTileHeight": "Высота плиток",
|
||||
"infillMosaicMinColor": "Мин цвет",
|
||||
"infillMosaicMaxColor": "Макс цвет",
|
||||
"infillColorValue": "Цвет заливки"
|
||||
"infillColorValue": "Цвет заливки",
|
||||
"globalSettings": "Глобальные настройки",
|
||||
"globalNegativePromptPlaceholder": "Глобальный негативный запрос",
|
||||
"globalPositivePromptPlaceholder": "Глобальный запрос"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Модели",
|
||||
@@ -706,7 +722,9 @@
|
||||
"coherenceModeBoxBlur": "коробчатое размытие",
|
||||
"discardCurrent": "Отбросить текущее",
|
||||
"invertBrushSizeScrollDirection": "Инвертировать прокрутку для размера кисти",
|
||||
"initialFitImageSize": "Подогнать размер изображения при перебросе"
|
||||
"initialFitImageSize": "Подогнать размер изображения при перебросе",
|
||||
"hideBoundingBox": "Скрыть ограничительную рамку",
|
||||
"showBoundingBox": "Показать ограничительную рамку"
|
||||
},
|
||||
"accessibility": {
|
||||
"uploadImage": "Загрузить изображение",
|
||||
@@ -849,7 +867,10 @@
|
||||
"editMode": "Открыть в редакторе узлов",
|
||||
"resetToDefaultValue": "Сбросить к стандартному значкнию",
|
||||
"edit": "Редактировать",
|
||||
"noFieldsViewMode": "В этом рабочем процессе нет выбранных полей для отображения. Просмотрите полный рабочий процесс для настройки значений."
|
||||
"noFieldsViewMode": "В этом рабочем процессе нет выбранных полей для отображения. Просмотрите полный рабочий процесс для настройки значений.",
|
||||
"graph": "График",
|
||||
"showEdgeLabels": "Показать метки на ребрах",
|
||||
"showEdgeLabelsHelp": "Показать метки на ребрах, указывающие на соединенные узлы"
|
||||
},
|
||||
"controlnet": {
|
||||
"amult": "a_mult",
|
||||
@@ -917,8 +938,8 @@
|
||||
"lineartAnime": "Контурный рисунок в стиле аниме",
|
||||
"mediapipeFaceDescription": "Обнаружение лиц с помощью Mediapipe",
|
||||
"hedDescription": "Целостное обнаружение границ",
|
||||
"setControlImageDimensions": "Установите размеры контрольного изображения на Ш/В",
|
||||
"scribble": "каракули",
|
||||
"setControlImageDimensions": "Скопируйте размер в Ш/В (оптимизируйте для модели)",
|
||||
"scribble": "Штрихи",
|
||||
"maxFaces": "Макс Лица",
|
||||
"mlsdDescription": "Минималистичный детектор отрезков линии",
|
||||
"resizeSimple": "Изменить размер (простой)",
|
||||
@@ -933,7 +954,18 @@
|
||||
"small": "Маленький",
|
||||
"body": "Тело",
|
||||
"hands": "Руки",
|
||||
"selectCLIPVisionModel": "Выбрать модель CLIP Vision"
|
||||
"selectCLIPVisionModel": "Выбрать модель CLIP Vision",
|
||||
"ipAdapterMethod": "Метод",
|
||||
"full": "Всё",
|
||||
"mlsd": "M-LSD",
|
||||
"h": "H",
|
||||
"style": "Только стиль",
|
||||
"dwOpenpose": "DW Openpose",
|
||||
"pidi": "PIDI",
|
||||
"composition": "Только композиция",
|
||||
"hed": "HED",
|
||||
"beginEndStepPercentShort": "Начало/конец %",
|
||||
"setControlImageDimensionsForce": "Скопируйте размер в Ш/В (игнорируйте модель)"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Авто добавление Доски",
|
||||
@@ -1312,6 +1344,12 @@
|
||||
"paragraphs": [
|
||||
"Плавно укладывайте изображение вдоль вертикальной оси."
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Метод",
|
||||
"paragraphs": [
|
||||
"Метод, с помощью которого применяется текущий IP-адаптер."
|
||||
]
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
@@ -1359,7 +1397,6 @@
|
||||
"completed": "Выполнено",
|
||||
"queueBack": "Добавить в очередь",
|
||||
"cancelFailed": "Проблема с отменой элемента",
|
||||
"queueCountPrediction": "{{promptsCount}} запросов × {{iterations}} изображений -> {{count}} генераций",
|
||||
"batchQueued": "Пакетная очередь",
|
||||
"pauseFailed": "Проблема с приостановкой рендеринга",
|
||||
"clearFailed": "Проблема с очисткой очереди",
|
||||
@@ -1475,7 +1512,11 @@
|
||||
"projectWorkflows": "Рабочие процессы проекта",
|
||||
"defaultWorkflows": "Стандартные рабочие процессы",
|
||||
"name": "Имя",
|
||||
"noRecentWorkflows": "Нет последних рабочих процессов"
|
||||
"noRecentWorkflows": "Нет последних рабочих процессов",
|
||||
"loadWorkflow": "Рабочий процесс $t(common.load)",
|
||||
"convertGraph": "Конвертировать график",
|
||||
"loadFromGraph": "Загрузка рабочего процесса из графика",
|
||||
"autoLayout": "Автоматическое расположение"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Включить исправление высокого разрешения",
|
||||
@@ -1528,5 +1569,55 @@
|
||||
"addPromptTrigger": "Добавить триггер запроса",
|
||||
"compatibleEmbeddings": "Совместимые встраивания",
|
||||
"noMatchingTriggers": "Нет соответствующих триггеров"
|
||||
},
|
||||
"controlLayers": {
|
||||
"moveToBack": "На задний план",
|
||||
"moveForward": "Переместить вперёд",
|
||||
"moveBackward": "Переместить назад",
|
||||
"brushSize": "Размер кисти",
|
||||
"controlLayers": "Слои управления",
|
||||
"globalMaskOpacity": "Глобальная непрозрачность маски",
|
||||
"autoNegative": "Авто негатив",
|
||||
"deletePrompt": "Удалить запрос",
|
||||
"resetRegion": "Сбросить регион",
|
||||
"debugLayers": "Слои отладки",
|
||||
"rectangle": "Прямоугольник",
|
||||
"maskPreviewColor": "Цвет предпросмотра маски",
|
||||
"addNegativePrompt": "Добавить $t(common.negativePrompt)",
|
||||
"regionalGuidance": "Региональная точность",
|
||||
"opacity": "Непрозрачность",
|
||||
"globalControlAdapter": "Глобальный $t(controlnet.controlAdapter_one)",
|
||||
"globalControlAdapterLayer": "Глобальный $t(controlnet.controlAdapter_one) $t(unifiedCanvas.layer)",
|
||||
"globalIPAdapter": "Глобальный $t(common.ipAdapter)",
|
||||
"globalIPAdapterLayer": "Глобальный $t(common.ipAdapter) $t(unifiedCanvas.layer)",
|
||||
"opacityFilter": "Фильтр непрозрачности",
|
||||
"deleteAll": "Удалить всё",
|
||||
"addLayer": "Добавить слой",
|
||||
"moveToFront": "На передний план",
|
||||
"addPositivePrompt": "Добавить $t(common.positivePrompt)",
|
||||
"addIPAdapter": "Добавить $t(common.ipAdapter)",
|
||||
"regionalGuidanceLayer": "$t(controlLayers.regionalGuidance) $t(unifiedCanvas.layer)",
|
||||
"resetProcessor": "Сброс процессора по умолчанию",
|
||||
"clearProcessor": "Чистый процессор",
|
||||
"globalInitialImage": "Глобальное исходное изображение",
|
||||
"globalInitialImageLayer": "$t(controlLayers.globalInitialImage) $t(unifiedCanvas.layer)",
|
||||
"noLayersAdded": "Без слоев",
|
||||
"layers_one": "Слой",
|
||||
"layers_few": "Слоя",
|
||||
"layers_many": "Слоев"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "Генерация",
|
||||
"canvas": "Холст",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"models": "Модели",
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"workflows": "Рабочие процессы",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"queue": "Очередь"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
"saveAs": "保存为",
|
||||
"ai": "ai",
|
||||
"or": "或",
|
||||
"aboutDesc": "使用 Invoke 工作?查看:",
|
||||
"aboutDesc": "使用 Invoke 工作?来看看:",
|
||||
"add": "添加",
|
||||
"loglevel": "日志级别",
|
||||
"copy": "复制",
|
||||
@@ -445,7 +445,6 @@
|
||||
"useX2Model": "图像太大,无法使用 x4 模型,使用 x2 模型作为替代",
|
||||
"tooLarge": "图像太大无法进行放大,请选择更小的图像"
|
||||
},
|
||||
"iterationsWithCount_other": "{{count}} 次迭代生成",
|
||||
"cfgRescaleMultiplier": "CFG 重缩放倍数",
|
||||
"useSize": "使用尺寸",
|
||||
"setToOptimalSize": "优化模型大小",
|
||||
@@ -853,7 +852,6 @@
|
||||
"pruneSucceeded": "从队列修剪 {{item_count}} 个已完成的项目",
|
||||
"notReady": "无法排队",
|
||||
"batchFailedToQueue": "批次加入队列失败",
|
||||
"queueCountPrediction": "{{promptsCount}} 提示词 × {{iterations}} 迭代次数 -> {{count}} 次生成",
|
||||
"batchQueued": "加入队列的批次",
|
||||
"front": "前",
|
||||
"pruneTooltip": "修剪 {{item_count}} 个已完成的项目",
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
/* eslint-disable no-console */
|
||||
import fs from 'node:fs';
|
||||
|
||||
import openapiTS from 'openapi-typescript';
|
||||
|
||||
@@ -21,10 +21,10 @@ import i18n from 'i18n';
|
||||
import { size } from 'lodash-es';
|
||||
import { memo, useCallback, useEffect } from 'react';
|
||||
import { ErrorBoundary } from 'react-error-boundary';
|
||||
import { useGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
|
||||
|
||||
import AppErrorBoundaryFallback from './AppErrorBoundaryFallback';
|
||||
import PreselectedImage from './PreselectedImage';
|
||||
import Toaster from './Toaster';
|
||||
|
||||
const DEFAULT_CONFIG = {};
|
||||
|
||||
@@ -46,6 +46,7 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
|
||||
useSocketIO();
|
||||
useGlobalModifiersInit();
|
||||
useGlobalHotkeys();
|
||||
useGetOpenAPISchemaQuery();
|
||||
|
||||
const { dropzone, isHandlingUpload, setIsHandlingUpload } = useFullscreenDropzone();
|
||||
|
||||
@@ -94,7 +95,6 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
|
||||
<DeleteImageModal />
|
||||
<ChangeBoardModal />
|
||||
<DynamicPromptsModal />
|
||||
<Toaster />
|
||||
<PreselectedImage selectedImage={selectedImage} />
|
||||
</ErrorBoundary>
|
||||
);
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import { Button, Flex, Heading, Link, Text, useToast } from '@invoke-ai/ui-library';
|
||||
import { Button, Flex, Heading, Image, Link, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import newGithubIssueUrl from 'new-github-issue-url';
|
||||
import InvokeLogoYellow from 'public/assets/images/invoke-symbol-ylw-lrg.svg';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowCounterClockwiseBold, PiArrowSquareOutBold, PiCopyBold } from 'react-icons/pi';
|
||||
@@ -11,31 +14,39 @@ type Props = {
|
||||
};
|
||||
|
||||
const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
|
||||
const toast = useToast();
|
||||
const { t } = useTranslation();
|
||||
const isLocal = useAppSelector((s) => s.config.isLocal);
|
||||
|
||||
const handleCopy = useCallback(() => {
|
||||
const text = JSON.stringify(serializeError(error), null, 2);
|
||||
navigator.clipboard.writeText(`\`\`\`\n${text}\n\`\`\``);
|
||||
toast({
|
||||
title: 'Error Copied',
|
||||
id: 'ERROR_COPIED',
|
||||
title: t('toast.errorCopied'),
|
||||
});
|
||||
}, [error, toast]);
|
||||
}, [error, t]);
|
||||
|
||||
const url = useMemo(
|
||||
() =>
|
||||
newGithubIssueUrl({
|
||||
const url = useMemo(() => {
|
||||
if (isLocal) {
|
||||
return newGithubIssueUrl({
|
||||
user: 'invoke-ai',
|
||||
repo: 'InvokeAI',
|
||||
template: 'BUG_REPORT.yml',
|
||||
title: `[bug]: ${error.name}: ${error.message}`,
|
||||
}),
|
||||
[error.message, error.name]
|
||||
);
|
||||
});
|
||||
} else {
|
||||
return 'https://support.invoke.ai/support/tickets/new';
|
||||
}
|
||||
}, [error.message, error.name, isLocal]);
|
||||
|
||||
return (
|
||||
<Flex layerStyle="body" w="100vw" h="100vh" alignItems="center" justifyContent="center" p={4}>
|
||||
<Flex layerStyle="first" flexDir="column" borderRadius="base" justifyContent="center" gap={8} p={16}>
|
||||
<Heading>{t('common.somethingWentWrong')}</Heading>
|
||||
<Flex alignItems="center" gap="2">
|
||||
<Image src={InvokeLogoYellow} alt="invoke-logo" w="24px" h="24px" minW="24px" minH="24px" userSelect="none" />
|
||||
<Heading fontSize="2xl">{t('common.somethingWentWrong')}</Heading>
|
||||
</Flex>
|
||||
|
||||
<Flex
|
||||
layerStyle="second"
|
||||
px={8}
|
||||
@@ -57,7 +68,9 @@ const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
|
||||
{t('common.copyError')}
|
||||
</Button>
|
||||
<Link href={url} isExternal>
|
||||
<Button leftIcon={<PiArrowSquareOutBold />}>{t('accessibility.createIssue')}</Button>
|
||||
<Button leftIcon={<PiArrowSquareOutBold />}>
|
||||
{isLocal ? t('accessibility.createIssue') : t('accessibility.submitSupportTicket')}
|
||||
</Button>
|
||||
</Link>
|
||||
</Flex>
|
||||
</Flex>
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
import { useToast } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { addToast, clearToastQueue } from 'features/system/store/systemSlice';
|
||||
import type { MakeToastArg } from 'features/system/util/makeToast';
|
||||
import { makeToast } from 'features/system/util/makeToast';
|
||||
import { memo, useCallback, useEffect } from 'react';
|
||||
|
||||
/**
|
||||
* Logical component. Watches the toast queue and makes toasts when the queue is not empty.
|
||||
* @returns null
|
||||
*/
|
||||
const Toaster = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const toastQueue = useAppSelector((s) => s.system.toastQueue);
|
||||
const toast = useToast();
|
||||
useEffect(() => {
|
||||
toastQueue.forEach((t) => {
|
||||
toast(t);
|
||||
});
|
||||
toastQueue.length > 0 && dispatch(clearToastQueue());
|
||||
}, [dispatch, toast, toastQueue]);
|
||||
|
||||
return null;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a function that can be used to make a toast.
|
||||
* @example
|
||||
* const toaster = useAppToaster();
|
||||
* toaster('Hello world!');
|
||||
* toaster({ title: 'Hello world!', status: 'success' });
|
||||
* @returns A function that can be used to make a toast.
|
||||
* @see makeToast
|
||||
* @see MakeToastArg
|
||||
* @see UseToastOptions
|
||||
*/
|
||||
export const useAppToaster = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const toaster = useCallback((arg: MakeToastArg) => dispatch(addToast(makeToast(arg))), [dispatch]);
|
||||
|
||||
return toaster;
|
||||
};
|
||||
|
||||
export default memo(Toaster);
|
||||
@@ -67,6 +67,8 @@ export const useSocketIO = () => {
|
||||
|
||||
if ($isDebugging.get() || import.meta.env.MODE === 'development') {
|
||||
window.$socketOptions = $socketOptions;
|
||||
// This is only enabled manually for debugging, console is allowed.
|
||||
/* eslint-disable-next-line no-console */
|
||||
console.log('Socket initialized', socket);
|
||||
}
|
||||
|
||||
@@ -75,6 +77,8 @@ export const useSocketIO = () => {
|
||||
return () => {
|
||||
if ($isDebugging.get() || import.meta.env.MODE === 'development') {
|
||||
window.$socketOptions = undefined;
|
||||
// This is only enabled manually for debugging, console is allowed.
|
||||
/* eslint-disable-next-line no-console */
|
||||
console.log('Socket teardown', socket);
|
||||
}
|
||||
socket.disconnect();
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/* eslint-disable no-console */
|
||||
// This is only enabled manually for debugging, console is allowed.
|
||||
|
||||
import type { Middleware, MiddlewareAPI } from '@reduxjs/toolkit';
|
||||
import { diff } from 'jsondiffpatch';
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import type { UnknownAction } from '@reduxjs/toolkit';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { isAnyGraphBuilt } from 'features/nodes/store/actions';
|
||||
import { nodeTemplatesBuilt } from 'features/nodes/store/nodesSlice';
|
||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
import type { Graph } from 'services/api/types';
|
||||
import { socketGeneratorProgress } from 'services/events/actions';
|
||||
@@ -25,13 +24,6 @@ export const actionSanitizer = <A extends UnknownAction>(action: A): A => {
|
||||
};
|
||||
}
|
||||
|
||||
if (nodeTemplatesBuilt.match(action)) {
|
||||
return {
|
||||
...action,
|
||||
payload: '<Node templates omitted>',
|
||||
};
|
||||
}
|
||||
|
||||
if (socketGeneratorProgress.match(action)) {
|
||||
const sanitized = deepClone(action);
|
||||
if (sanitized.payload.data.progress_image) {
|
||||
|
||||
@@ -41,12 +41,10 @@ import { addGeneratorProgressEventListener } from 'app/store/middleware/listener
|
||||
import { addGraphExecutionStateCompleteEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketGraphExecutionStateComplete';
|
||||
import { addInvocationCompleteEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete';
|
||||
import { addInvocationErrorEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationError';
|
||||
import { addInvocationRetrievalErrorEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationRetrievalError';
|
||||
import { addInvocationStartedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationStarted';
|
||||
import { addModelInstallEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketModelInstall';
|
||||
import { addModelLoadEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketModelLoad';
|
||||
import { addSocketQueueItemStatusChangedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketQueueItemStatusChanged';
|
||||
import { addSessionRetrievalErrorEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketSessionRetrievalError';
|
||||
import { addSocketSubscribedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketSubscribed';
|
||||
import { addSocketUnsubscribedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketUnsubscribed';
|
||||
import { addStagingAreaImageSavedListener } from 'app/store/middleware/listenerMiddleware/listeners/stagingAreaImageSaved';
|
||||
@@ -114,8 +112,6 @@ addSocketSubscribedEventListener(startAppListening);
|
||||
addSocketUnsubscribedEventListener(startAppListening);
|
||||
addModelLoadEventListener(startAppListening);
|
||||
addModelInstallEventListener(startAppListening);
|
||||
addSessionRetrievalErrorEventListener(startAppListening);
|
||||
addInvocationRetrievalErrorEventListener(startAppListening);
|
||||
addSocketQueueItemStatusChangedEventListener(startAppListening);
|
||||
addBulkDownloadListeners(startAppListening);
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
resetCanvas,
|
||||
setInitialCanvasImage,
|
||||
} from 'features/canvas/store/canvasSlice';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
|
||||
@@ -30,22 +30,20 @@ export const addCommitStagingAreaImageListener = (startAppListening: AppStartLis
|
||||
req.reset();
|
||||
if (canceled > 0) {
|
||||
log.debug(`Canceled ${canceled} canvas batches`);
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('queue.cancelBatchSucceeded'),
|
||||
status: 'success',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'CANCEL_BATCH_SUCCEEDED',
|
||||
title: t('queue.cancelBatchSucceeded'),
|
||||
status: 'success',
|
||||
});
|
||||
}
|
||||
dispatch(canvasBatchIdsReset());
|
||||
} catch {
|
||||
log.error('Failed to cancel canvas batches');
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('queue.cancelBatchFailed'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'CANCEL_BATCH_FAILED',
|
||||
title: t('queue.cancelBatchFailed'),
|
||||
status: 'error',
|
||||
});
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { toast } from 'common/util/toast';
|
||||
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { truncate, upperFirst } from 'lodash-es';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
@@ -16,18 +16,15 @@ export const addBatchEnqueuedListener = (startAppListening: AppStartListening) =
|
||||
const arg = action.meta.arg.originalArgs;
|
||||
logger('queue').debug({ enqueueResult: parseify(response) }, 'Batch enqueued');
|
||||
|
||||
if (!toast.isActive('batch-queued')) {
|
||||
toast({
|
||||
id: 'batch-queued',
|
||||
title: t('queue.batchQueued'),
|
||||
description: t('queue.batchQueuedDesc', {
|
||||
count: response.enqueued,
|
||||
direction: arg.prepend ? t('queue.front') : t('queue.back'),
|
||||
}),
|
||||
duration: 1000,
|
||||
status: 'success',
|
||||
});
|
||||
}
|
||||
toast({
|
||||
id: 'QUEUE_BATCH_SUCCEEDED',
|
||||
title: t('queue.batchQueued'),
|
||||
status: 'success',
|
||||
description: t('queue.batchQueuedDesc', {
|
||||
count: response.enqueued,
|
||||
direction: arg.prepend ? t('queue.front') : t('queue.back'),
|
||||
}),
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
@@ -40,9 +37,10 @@ export const addBatchEnqueuedListener = (startAppListening: AppStartListening) =
|
||||
|
||||
if (!response) {
|
||||
toast({
|
||||
id: 'QUEUE_BATCH_FAILED',
|
||||
title: t('queue.batchFailedToQueue'),
|
||||
status: 'error',
|
||||
description: 'Unknown Error',
|
||||
description: t('common.unknownError'),
|
||||
});
|
||||
logger('queue').error({ batchConfig: parseify(arg), error: parseify(response) }, t('queue.batchFailedToQueue'));
|
||||
return;
|
||||
@@ -52,7 +50,7 @@ export const addBatchEnqueuedListener = (startAppListening: AppStartListening) =
|
||||
if (result.success) {
|
||||
result.data.data.detail.map((e) => {
|
||||
toast({
|
||||
id: 'batch-failed-to-queue',
|
||||
id: 'QUEUE_BATCH_FAILED',
|
||||
title: truncate(upperFirst(e.msg), { length: 128 }),
|
||||
status: 'error',
|
||||
description: truncate(
|
||||
@@ -64,9 +62,10 @@ export const addBatchEnqueuedListener = (startAppListening: AppStartListening) =
|
||||
});
|
||||
} else if (response.status !== 403) {
|
||||
toast({
|
||||
id: 'QUEUE_BATCH_FAILED',
|
||||
title: t('queue.batchFailedToQueue'),
|
||||
description: t('common.unknownError'),
|
||||
status: 'error',
|
||||
description: t('common.unknownError'),
|
||||
});
|
||||
}
|
||||
logger('queue').error({ batchConfig: parseify(arg), error: parseify(response) }, t('queue.batchFailedToQueue'));
|
||||
|
||||
@@ -21,7 +21,7 @@ export const addDeleteBoardAndImagesFulfilledListener = (startAppListening: AppS
|
||||
|
||||
const { canvas, nodes, controlAdapters, controlLayers } = getState();
|
||||
deleted_images.forEach((image_name) => {
|
||||
const imageUsage = getImageUsage(canvas, nodes, controlAdapters, controlLayers.present, image_name);
|
||||
const imageUsage = getImageUsage(canvas, nodes.present, controlAdapters, controlLayers.present, image_name);
|
||||
|
||||
if (imageUsage.isCanvasImage && !wasCanvasReset) {
|
||||
dispatch(resetCanvas());
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import type { UseToastOptions } from '@invoke-ai/ui-library';
|
||||
import { ExternalLink } from '@invoke-ai/ui-library';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { toast } from 'common/util/toast';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import {
|
||||
@@ -28,7 +27,6 @@ export const addBulkDownloadListeners = (startAppListening: AppStartListening) =
|
||||
// Show the response message if it exists, otherwise show the default message
|
||||
description: action.payload.response || t('gallery.bulkDownloadRequestedDesc'),
|
||||
duration: null,
|
||||
isClosable: true,
|
||||
});
|
||||
},
|
||||
});
|
||||
@@ -40,9 +38,9 @@ export const addBulkDownloadListeners = (startAppListening: AppStartListening) =
|
||||
|
||||
// There isn't any toast to update if we get this event.
|
||||
toast({
|
||||
id: 'BULK_DOWNLOAD_REQUEST_FAILED',
|
||||
title: t('gallery.bulkDownloadRequestFailed'),
|
||||
status: 'success',
|
||||
isClosable: true,
|
||||
status: 'error',
|
||||
});
|
||||
},
|
||||
});
|
||||
@@ -65,7 +63,7 @@ export const addBulkDownloadListeners = (startAppListening: AppStartListening) =
|
||||
// TODO(psyche): This URL may break in in some environments (e.g. Nvidia workbench) but we need to test it first
|
||||
const url = `/api/v1/images/download/${bulk_download_item_name}`;
|
||||
|
||||
const toastOptions: UseToastOptions = {
|
||||
toast({
|
||||
id: bulk_download_item_name,
|
||||
title: t('gallery.bulkDownloadReady', 'Download ready'),
|
||||
status: 'success',
|
||||
@@ -77,14 +75,7 @@ export const addBulkDownloadListeners = (startAppListening: AppStartListening) =
|
||||
/>
|
||||
),
|
||||
duration: null,
|
||||
isClosable: true,
|
||||
};
|
||||
|
||||
if (toast.isActive(bulk_download_item_name)) {
|
||||
toast.update(bulk_download_item_name, toastOptions);
|
||||
} else {
|
||||
toast(toastOptions);
|
||||
}
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
@@ -95,20 +86,13 @@ export const addBulkDownloadListeners = (startAppListening: AppStartListening) =
|
||||
|
||||
const { bulk_download_item_name } = action.payload.data;
|
||||
|
||||
const toastOptions: UseToastOptions = {
|
||||
toast({
|
||||
id: bulk_download_item_name,
|
||||
title: t('gallery.bulkDownloadFailed'),
|
||||
status: 'error',
|
||||
description: action.payload.data.error,
|
||||
duration: null,
|
||||
isClosable: true,
|
||||
};
|
||||
|
||||
if (toast.isActive(bulk_download_item_name)) {
|
||||
toast.update(bulk_download_item_name, toastOptions);
|
||||
} else {
|
||||
toast(toastOptions);
|
||||
}
|
||||
});
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -2,14 +2,14 @@ import { $logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { canvasCopiedToClipboard } from 'features/canvas/store/actions';
|
||||
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
|
||||
export const addCanvasCopiedToClipboardListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
actionCreator: canvasCopiedToClipboard,
|
||||
effect: async (action, { dispatch, getState }) => {
|
||||
effect: async (action, { getState }) => {
|
||||
const moduleLog = $logger.get().child({ namespace: 'canvasCopiedToClipboardListener' });
|
||||
const state = getState();
|
||||
|
||||
@@ -19,22 +19,20 @@ export const addCanvasCopiedToClipboardListener = (startAppListening: AppStartLi
|
||||
copyBlobToClipboard(blob);
|
||||
} catch (err) {
|
||||
moduleLog.error(String(err));
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('toast.problemCopyingCanvas'),
|
||||
description: t('toast.problemCopyingCanvasDesc'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'CANVAS_COPY_FAILED',
|
||||
title: t('toast.problemCopyingCanvas'),
|
||||
description: t('toast.problemCopyingCanvasDesc'),
|
||||
status: 'error',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('toast.canvasCopiedClipboard'),
|
||||
status: 'success',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'CANVAS_COPY_SUCCEEDED',
|
||||
title: t('toast.canvasCopiedClipboard'),
|
||||
status: 'success',
|
||||
});
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -3,13 +3,13 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'
|
||||
import { canvasDownloadedAsImage } from 'features/canvas/store/actions';
|
||||
import { downloadBlob } from 'features/canvas/util/downloadBlob';
|
||||
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
|
||||
export const addCanvasDownloadedAsImageListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
actionCreator: canvasDownloadedAsImage,
|
||||
effect: async (action, { dispatch, getState }) => {
|
||||
effect: async (action, { getState }) => {
|
||||
const moduleLog = $logger.get().child({ namespace: 'canvasSavedToGalleryListener' });
|
||||
const state = getState();
|
||||
|
||||
@@ -18,18 +18,17 @@ export const addCanvasDownloadedAsImageListener = (startAppListening: AppStartLi
|
||||
blob = await getBaseLayerBlob(state);
|
||||
} catch (err) {
|
||||
moduleLog.error(String(err));
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('toast.problemDownloadingCanvas'),
|
||||
description: t('toast.problemDownloadingCanvasDesc'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'CANVAS_DOWNLOAD_FAILED',
|
||||
title: t('toast.problemDownloadingCanvas'),
|
||||
description: t('toast.problemDownloadingCanvasDesc'),
|
||||
status: 'error',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
downloadBlob(blob, 'canvas.png');
|
||||
dispatch(addToast({ title: t('toast.canvasDownloaded'), status: 'success' }));
|
||||
toast({ id: 'CANVAS_DOWNLOAD_SUCCEEDED', title: t('toast.canvasDownloaded'), status: 'success' });
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -3,7 +3,7 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'
|
||||
import { canvasImageToControlAdapter } from 'features/canvas/store/actions';
|
||||
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
|
||||
import { controlAdapterImageChanged } from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
@@ -20,13 +20,12 @@ export const addCanvasImageToControlNetListener = (startAppListening: AppStartLi
|
||||
blob = await getBaseLayerBlob(state, true);
|
||||
} catch (err) {
|
||||
log.error(String(err));
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('toast.problemSavingCanvas'),
|
||||
description: t('toast.problemSavingCanvasDesc'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'PROBLEM_SAVING_CANVAS',
|
||||
title: t('toast.problemSavingCanvas'),
|
||||
description: t('toast.problemSavingCanvasDesc'),
|
||||
status: 'error',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -43,7 +42,7 @@ export const addCanvasImageToControlNetListener = (startAppListening: AppStartLi
|
||||
crop_visible: false,
|
||||
postUploadAction: {
|
||||
type: 'TOAST',
|
||||
toastOptions: { title: t('toast.canvasSentControlnetAssets') },
|
||||
title: t('toast.canvasSentControlnetAssets'),
|
||||
},
|
||||
})
|
||||
).unwrap();
|
||||
|
||||
@@ -2,7 +2,7 @@ import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { canvasMaskSavedToGallery } from 'features/canvas/store/actions';
|
||||
import { getCanvasData } from 'features/canvas/util/getCanvasData';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
@@ -29,13 +29,12 @@ export const addCanvasMaskSavedToGalleryListener = (startAppListening: AppStartL
|
||||
|
||||
if (!maskBlob) {
|
||||
log.error('Problem getting mask layer blob');
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('toast.problemSavingMask'),
|
||||
description: t('toast.problemSavingMaskDesc'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'PROBLEM_SAVING_MASK',
|
||||
title: t('toast.problemSavingMask'),
|
||||
description: t('toast.problemSavingMaskDesc'),
|
||||
status: 'error',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -52,7 +51,7 @@ export const addCanvasMaskSavedToGalleryListener = (startAppListening: AppStartL
|
||||
crop_visible: true,
|
||||
postUploadAction: {
|
||||
type: 'TOAST',
|
||||
toastOptions: { title: t('toast.maskSavedAssets') },
|
||||
title: t('toast.maskSavedAssets'),
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
@@ -3,7 +3,7 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'
|
||||
import { canvasMaskToControlAdapter } from 'features/canvas/store/actions';
|
||||
import { getCanvasData } from 'features/canvas/util/getCanvasData';
|
||||
import { controlAdapterImageChanged } from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
@@ -30,13 +30,12 @@ export const addCanvasMaskToControlNetListener = (startAppListening: AppStartLis
|
||||
|
||||
if (!maskBlob) {
|
||||
log.error('Problem getting mask layer blob');
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('toast.problemImportingMask'),
|
||||
description: t('toast.problemImportingMaskDesc'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'PROBLEM_IMPORTING_MASK',
|
||||
title: t('toast.problemImportingMask'),
|
||||
description: t('toast.problemImportingMaskDesc'),
|
||||
status: 'error',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -53,7 +52,7 @@ export const addCanvasMaskToControlNetListener = (startAppListening: AppStartLis
|
||||
crop_visible: false,
|
||||
postUploadAction: {
|
||||
type: 'TOAST',
|
||||
toastOptions: { title: t('toast.maskSentControlnetAssets') },
|
||||
title: t('toast.maskSentControlnetAssets'),
|
||||
},
|
||||
})
|
||||
).unwrap();
|
||||
|
||||
@@ -4,7 +4,7 @@ import { canvasMerged } from 'features/canvas/store/actions';
|
||||
import { $canvasBaseLayer } from 'features/canvas/store/canvasNanostore';
|
||||
import { setMergedCanvas } from 'features/canvas/store/canvasSlice';
|
||||
import { getFullBaseLayerBlob } from 'features/canvas/util/getFullBaseLayerBlob';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
@@ -17,13 +17,12 @@ export const addCanvasMergedListener = (startAppListening: AppStartListening) =>
|
||||
|
||||
if (!blob) {
|
||||
moduleLog.error('Problem getting base layer blob');
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('toast.problemMergingCanvas'),
|
||||
description: t('toast.problemMergingCanvasDesc'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'PROBLEM_MERGING_CANVAS',
|
||||
title: t('toast.problemMergingCanvas'),
|
||||
description: t('toast.problemMergingCanvasDesc'),
|
||||
status: 'error',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -31,13 +30,12 @@ export const addCanvasMergedListener = (startAppListening: AppStartListening) =>
|
||||
|
||||
if (!canvasBaseLayer) {
|
||||
moduleLog.error('Problem getting canvas base layer');
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('toast.problemMergingCanvas'),
|
||||
description: t('toast.problemMergingCanvasDesc'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'PROBLEM_MERGING_CANVAS',
|
||||
title: t('toast.problemMergingCanvas'),
|
||||
description: t('toast.problemMergingCanvasDesc'),
|
||||
status: 'error',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -54,7 +52,7 @@ export const addCanvasMergedListener = (startAppListening: AppStartListening) =>
|
||||
is_intermediate: true,
|
||||
postUploadAction: {
|
||||
type: 'TOAST',
|
||||
toastOptions: { title: t('toast.canvasMerged') },
|
||||
title: t('toast.canvasMerged'),
|
||||
},
|
||||
})
|
||||
).unwrap();
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { canvasSavedToGallery } from 'features/canvas/store/actions';
|
||||
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
@@ -18,13 +19,12 @@ export const addCanvasSavedToGalleryListener = (startAppListening: AppStartListe
|
||||
blob = await getBaseLayerBlob(state);
|
||||
} catch (err) {
|
||||
log.error(String(err));
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('toast.problemSavingCanvas'),
|
||||
description: t('toast.problemSavingCanvasDesc'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'CANVAS_SAVE_FAILED',
|
||||
title: t('toast.problemSavingCanvas'),
|
||||
description: t('toast.problemSavingCanvasDesc'),
|
||||
status: 'error',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -41,7 +41,10 @@ export const addCanvasSavedToGalleryListener = (startAppListening: AppStartListe
|
||||
crop_visible: true,
|
||||
postUploadAction: {
|
||||
type: 'TOAST',
|
||||
toastOptions: { title: t('toast.canvasSavedGallery') },
|
||||
title: t('toast.canvasSavedGallery'),
|
||||
},
|
||||
metadata: {
|
||||
_canvas_objects: parseify(state.canvas.layerState.objects),
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
@@ -1,61 +1,57 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppDispatch } from 'app/store/store';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import {
|
||||
caLayerImageChanged,
|
||||
caLayerIsProcessingImageChanged,
|
||||
caLayerModelChanged,
|
||||
caLayerProcessedImageChanged,
|
||||
caLayerProcessorConfigChanged,
|
||||
caLayerProcessorPendingBatchIdChanged,
|
||||
caLayerRecalled,
|
||||
isControlAdapterLayer,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { CA_PROCESSOR_DATA } from 'features/controlLayers/util/controlAdapters';
|
||||
import { isImageOutput } from 'features/nodes/types/common';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { getImageDTO } from 'services/api/endpoints/images';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig, ImageDTO } from 'services/api/types';
|
||||
import type { BatchConfig } from 'services/api/types';
|
||||
import { socketInvocationComplete } from 'services/events/actions';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const matcher = isAnyOf(caLayerImageChanged, caLayerProcessorConfigChanged, caLayerModelChanged, caLayerRecalled);
|
||||
|
||||
const DEBOUNCE_MS = 300;
|
||||
const log = logger('session');
|
||||
|
||||
/**
|
||||
* Simple helper to cancel a batch and reset the pending batch ID
|
||||
*/
|
||||
const cancelProcessorBatch = async (dispatch: AppDispatch, layerId: string, batchId: string) => {
|
||||
const req = dispatch(queueApi.endpoints.cancelByBatchIds.initiate({ batch_ids: [batchId] }));
|
||||
log.trace({ batchId }, 'Cancelling existing preprocessor batch');
|
||||
try {
|
||||
await req.unwrap();
|
||||
} catch {
|
||||
// no-op
|
||||
} finally {
|
||||
req.reset();
|
||||
// Always reset the pending batch ID - the cancel req could fail if the batch doesn't exist
|
||||
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: null }));
|
||||
}
|
||||
};
|
||||
|
||||
export const addControlAdapterPreprocessor = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
matcher,
|
||||
effect: async (action, { dispatch, getState, getOriginalState, cancelActiveListeners, delay, take }) => {
|
||||
effect: async (action, { dispatch, getState, getOriginalState, cancelActiveListeners, delay, take, signal }) => {
|
||||
const layerId = caLayerRecalled.match(action) ? action.payload.id : action.payload.layerId;
|
||||
const precheckLayerOriginal = getOriginalState()
|
||||
.controlLayers.present.layers.filter(isControlAdapterLayer)
|
||||
.find((l) => l.id === layerId);
|
||||
const precheckLayer = getState()
|
||||
.controlLayers.present.layers.filter(isControlAdapterLayer)
|
||||
.find((l) => l.id === layerId);
|
||||
|
||||
// Conditions to bail
|
||||
const layerDoesNotExist = !precheckLayer;
|
||||
const layerHasNoImage = !precheckLayer?.controlAdapter.image;
|
||||
const layerHasNoProcessorConfig = !precheckLayer?.controlAdapter.processorConfig;
|
||||
const layerIsAlreadyProcessingImage = precheckLayer?.controlAdapter.isProcessingImage;
|
||||
const areImageAndProcessorUnchanged =
|
||||
isEqual(precheckLayer?.controlAdapter.image, precheckLayerOriginal?.controlAdapter.image) &&
|
||||
isEqual(precheckLayer?.controlAdapter.processorConfig, precheckLayerOriginal?.controlAdapter.processorConfig);
|
||||
|
||||
if (
|
||||
layerDoesNotExist ||
|
||||
layerHasNoImage ||
|
||||
layerHasNoProcessorConfig ||
|
||||
areImageAndProcessorUnchanged ||
|
||||
layerIsAlreadyProcessingImage
|
||||
) {
|
||||
return;
|
||||
}
|
||||
const state = getState();
|
||||
const originalState = getOriginalState();
|
||||
|
||||
// Cancel any in-progress instances of this listener
|
||||
cancelActiveListeners();
|
||||
@@ -63,27 +59,55 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
|
||||
|
||||
// Delay before starting actual work
|
||||
await delay(DEBOUNCE_MS);
|
||||
dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: true }));
|
||||
|
||||
// Double-check that we are still eligible for processing
|
||||
const state = getState();
|
||||
const layer = state.controlLayers.present.layers.filter(isControlAdapterLayer).find((l) => l.id === layerId);
|
||||
const image = layer?.controlAdapter.image;
|
||||
const config = layer?.controlAdapter.processorConfig;
|
||||
|
||||
// If we have no image or there is no processor config, bail
|
||||
if (!layer || !image || !config) {
|
||||
if (!layer) {
|
||||
return;
|
||||
}
|
||||
|
||||
// @ts-expect-error: TS isn't able to narrow the typing of buildNode and `config` will error...
|
||||
const processorNode = CA_PROCESSOR_DATA[config.type].buildNode(image, config);
|
||||
// We should only process if the processor settings or image have changed
|
||||
const originalLayer = originalState.controlLayers.present.layers
|
||||
.filter(isControlAdapterLayer)
|
||||
.find((l) => l.id === layerId);
|
||||
const originalImage = originalLayer?.controlAdapter.image;
|
||||
const originalConfig = originalLayer?.controlAdapter.processorConfig;
|
||||
|
||||
const image = layer.controlAdapter.image;
|
||||
const config = layer.controlAdapter.processorConfig;
|
||||
|
||||
if (isEqual(config, originalConfig) && isEqual(image, originalImage)) {
|
||||
// Neither config nor image have changed, we can bail
|
||||
return;
|
||||
}
|
||||
|
||||
if (!image || !config) {
|
||||
// - If we have no image, we have nothing to process
|
||||
// - If we have no processor config, we have nothing to process
|
||||
// Clear the processed image and bail
|
||||
dispatch(caLayerProcessedImageChanged({ layerId, imageDTO: null }));
|
||||
return;
|
||||
}
|
||||
|
||||
// At this point, the user has stopped fiddling with the processor settings and there is a processor selected.
|
||||
|
||||
// If there is a pending processor batch, cancel it.
|
||||
if (layer.controlAdapter.processorPendingBatchId) {
|
||||
cancelProcessorBatch(dispatch, layerId, layer.controlAdapter.processorPendingBatchId);
|
||||
}
|
||||
|
||||
// TODO(psyche): I can't get TS to be happy, it thinkgs `config` is `never` but it should be inferred from the generic... I'll just cast it for now
|
||||
const processorNode = CA_PROCESSOR_DATA[config.type].buildNode(image, config as never);
|
||||
const enqueueBatchArg: BatchConfig = {
|
||||
prepend: true,
|
||||
batch: {
|
||||
graph: {
|
||||
nodes: {
|
||||
[processorNode.id]: { ...processorNode, is_intermediate: true },
|
||||
[processorNode.id]: {
|
||||
...processorNode,
|
||||
// Control images are always intermediate - do not save to gallery
|
||||
is_intermediate: true,
|
||||
},
|
||||
},
|
||||
edges: [],
|
||||
},
|
||||
@@ -91,16 +115,21 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
|
||||
},
|
||||
};
|
||||
|
||||
// Kick off the processor batch
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
|
||||
try {
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
const enqueueResult = await req.unwrap();
|
||||
req.reset();
|
||||
// TODO(psyche): Update the pydantic models, pretty sure we will _always_ have a batch_id here, but the model says it's optional
|
||||
assert(enqueueResult.batch.batch_id, 'Batch ID not returned from queue');
|
||||
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: enqueueResult.batch.batch_id }));
|
||||
log.debug({ enqueueResult: parseify(enqueueResult) }, t('queue.graphQueued'));
|
||||
|
||||
// Wait for the processor node to complete
|
||||
const [invocationCompleteAction] = await take(
|
||||
(action): action is ReturnType<typeof socketInvocationComplete> =>
|
||||
socketInvocationComplete.match(action) &&
|
||||
@@ -109,47 +138,50 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
|
||||
);
|
||||
|
||||
// We still have to check the output type
|
||||
if (isImageOutput(invocationCompleteAction.payload.data.result)) {
|
||||
const { image_name } = invocationCompleteAction.payload.data.result.image;
|
||||
assert(
|
||||
isImageOutput(invocationCompleteAction.payload.data.result),
|
||||
`Processor did not return an image output, got: ${invocationCompleteAction.payload.data.result}`
|
||||
);
|
||||
const { image_name } = invocationCompleteAction.payload.data.result.image;
|
||||
|
||||
// Wait for the ImageDTO to be received
|
||||
const [{ payload }] = await take(
|
||||
(action) =>
|
||||
imagesApi.endpoints.getImageDTO.matchFulfilled(action) && action.payload.image_name === image_name
|
||||
);
|
||||
const imageDTO = await getImageDTO(image_name);
|
||||
assert(imageDTO, "Failed to fetch processor output's image DTO");
|
||||
|
||||
const imageDTO = payload as ImageDTO;
|
||||
|
||||
log.debug({ layerId, imageDTO }, 'ControlNet image processed');
|
||||
|
||||
// Update the processed image in the store
|
||||
dispatch(
|
||||
caLayerProcessedImageChanged({
|
||||
layerId,
|
||||
imageDTO,
|
||||
})
|
||||
);
|
||||
dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: false }));
|
||||
}
|
||||
// Whew! We made it. Update the layer with the processed image
|
||||
log.debug({ layerId, imageDTO }, 'ControlNet image processed');
|
||||
dispatch(caLayerProcessedImageChanged({ layerId, imageDTO }));
|
||||
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: null }));
|
||||
} catch (error) {
|
||||
log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue'));
|
||||
dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: false }));
|
||||
if (signal.aborted) {
|
||||
// The listener was canceled - we need to cancel the pending processor batch, if there is one (could have changed by now).
|
||||
const pendingBatchId = getState()
|
||||
.controlLayers.present.layers.filter(isControlAdapterLayer)
|
||||
.find((l) => l.id === layerId)?.controlAdapter.processorPendingBatchId;
|
||||
if (pendingBatchId) {
|
||||
cancelProcessorBatch(dispatch, layerId, pendingBatchId);
|
||||
}
|
||||
log.trace('Control Adapter preprocessor cancelled');
|
||||
} else {
|
||||
// Some other error condition...
|
||||
log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue'));
|
||||
|
||||
if (error instanceof Object) {
|
||||
if ('data' in error && 'status' in error) {
|
||||
if (error.status === 403) {
|
||||
dispatch(caLayerImageChanged({ layerId, imageDTO: null }));
|
||||
return;
|
||||
if (error instanceof Object) {
|
||||
if ('data' in error && 'status' in error) {
|
||||
if (error.status === 403) {
|
||||
dispatch(caLayerImageChanged({ layerId, imageDTO: null }));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dispatch(
|
||||
addToast({
|
||||
toast({
|
||||
id: 'GRAPH_QUEUE_FAILED',
|
||||
title: t('queue.graphFailedToQueue'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
});
|
||||
}
|
||||
} finally {
|
||||
req.reset();
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
@@ -10,7 +10,7 @@ import {
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
|
||||
import { isImageOutput } from 'features/nodes/types/common';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
@@ -108,12 +108,11 @@ export const addControlNetImageProcessedListener = (startAppListening: AppStartL
|
||||
}
|
||||
}
|
||||
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('queue.graphFailedToQueue'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'GRAPH_QUEUE_FAILED',
|
||||
title: t('queue.graphFailedToQueue'),
|
||||
status: 'error',
|
||||
});
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
@@ -8,8 +8,8 @@ import { blobToDataURL } from 'features/canvas/util/blobToDataURL';
|
||||
import { getCanvasData } from 'features/canvas/util/getCanvasData';
|
||||
import { getCanvasGenerationMode } from 'features/canvas/util/getCanvasGenerationMode';
|
||||
import { canvasGraphBuilt } from 'features/nodes/store/actions';
|
||||
import { buildCanvasGraph } from 'features/nodes/util/graph/buildCanvasGraph';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildCanvasGraph } from 'features/nodes/util/graph/canvas/buildCanvasGraph';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { buildGenerationTabGraph } from 'features/nodes/util/graph/buildGenerationTabGraph';
|
||||
import { buildGenerationTabSDXLGraph } from 'features/nodes/util/graph/buildGenerationTabSDXLGraph';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildGenerationTabGraph } from 'features/nodes/util/graph/generation/buildGenerationTabGraph';
|
||||
import { buildGenerationTabSDXLGraph } from 'features/nodes/util/graph/generation/buildGenerationTabSDXLGraph';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
|
||||
export const addEnqueueRequestedLinear = (startAppListening: AppStartListening) => {
|
||||
@@ -18,7 +18,7 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
|
||||
let graph;
|
||||
|
||||
if (model && model.base === 'sdxl') {
|
||||
if (model?.base === 'sdxl') {
|
||||
graph = await buildGenerationTabSDXLGraph(state);
|
||||
} else {
|
||||
graph = await buildGenerationTabGraph(state);
|
||||
@@ -32,7 +32,7 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
})
|
||||
);
|
||||
try {
|
||||
req.unwrap();
|
||||
await req.unwrap();
|
||||
if (shouldShowProgressInViewer) {
|
||||
dispatch(isImageViewerOpenChanged(true));
|
||||
}
|
||||
|
||||
@@ -11,9 +11,9 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
enqueueRequested.match(action) && action.payload.tabName === 'workflows',
|
||||
effect: async (action, { getState, dispatch }) => {
|
||||
const state = getState();
|
||||
const { nodes, edges } = state.nodes;
|
||||
const { nodes, edges } = state.nodes.present;
|
||||
const workflow = state.workflow;
|
||||
const graph = buildNodesGraph(state.nodes);
|
||||
const graph = buildNodesGraph(state.nodes.present);
|
||||
const builtWorkflow = buildWorkflowWithValidation({
|
||||
nodes,
|
||||
edges,
|
||||
@@ -39,7 +39,11 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
req.reset();
|
||||
try {
|
||||
await req.unwrap();
|
||||
} finally {
|
||||
req.reset();
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { nodeTemplatesBuilt } from 'features/nodes/store/nodesSlice';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { parseSchema } from 'features/nodes/util/schema/parseSchema';
|
||||
import { size } from 'lodash-es';
|
||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
@@ -9,7 +9,7 @@ import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
export const addGetOpenAPISchemaListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
matcher: appInfoApi.endpoints.getOpenAPISchema.matchFulfilled,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
effect: (action, { getState }) => {
|
||||
const log = logger('system');
|
||||
const schemaJSON = action.payload;
|
||||
|
||||
@@ -20,7 +20,7 @@ export const addGetOpenAPISchemaListener = (startAppListening: AppStartListening
|
||||
|
||||
log.debug({ nodeTemplates: parseify(nodeTemplates) }, `Built ${size(nodeTemplates)} node templates`);
|
||||
|
||||
dispatch(nodeTemplatesBuilt(nodeTemplates));
|
||||
$templates.set(nodeTemplates);
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ import type { ImageDTO } from 'services/api/types';
|
||||
import { imagesSelectors } from 'services/api/util';
|
||||
|
||||
const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
|
||||
state.nodes.nodes.forEach((node) => {
|
||||
state.nodes.present.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import type { UseToastOptions } from '@invoke-ai/ui-library';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice';
|
||||
@@ -14,7 +13,7 @@ import {
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { omit } from 'lodash-es';
|
||||
import { boardsApi } from 'services/api/endpoints/boards';
|
||||
@@ -42,16 +41,17 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
return;
|
||||
}
|
||||
|
||||
const DEFAULT_UPLOADED_TOAST: UseToastOptions = {
|
||||
const DEFAULT_UPLOADED_TOAST = {
|
||||
id: 'IMAGE_UPLOADED',
|
||||
title: t('toast.imageUploaded'),
|
||||
status: 'success',
|
||||
};
|
||||
} as const;
|
||||
|
||||
// default action - just upload and alert user
|
||||
if (postUploadAction?.type === 'TOAST') {
|
||||
const { toastOptions } = postUploadAction;
|
||||
if (!autoAddBoardId || autoAddBoardId === 'none') {
|
||||
dispatch(addToast({ ...DEFAULT_UPLOADED_TOAST, ...toastOptions }));
|
||||
const title = postUploadAction.title || DEFAULT_UPLOADED_TOAST.title;
|
||||
toast({ ...DEFAULT_UPLOADED_TOAST, title });
|
||||
} else {
|
||||
// Add this image to the board
|
||||
dispatch(
|
||||
@@ -70,24 +70,20 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
? `${t('toast.addedToBoard')} ${board.board_name}`
|
||||
: `${t('toast.addedToBoard')} ${autoAddBoardId}`;
|
||||
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description,
|
||||
})
|
||||
);
|
||||
toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description,
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_CANVAS_INITIAL_IMAGE') {
|
||||
dispatch(setInitialCanvasImage(imageDTO, selectOptimalDimension(state)));
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setAsCanvasInitialImage'),
|
||||
})
|
||||
);
|
||||
toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setAsCanvasInitialImage'),
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -105,68 +101,56 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
controlImage: imageDTO.image_name,
|
||||
})
|
||||
);
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
})
|
||||
);
|
||||
toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_CA_LAYER_IMAGE') {
|
||||
const { layerId } = postUploadAction;
|
||||
dispatch(caLayerImageChanged({ layerId, imageDTO }));
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
})
|
||||
);
|
||||
toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
});
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_IPA_LAYER_IMAGE') {
|
||||
const { layerId } = postUploadAction;
|
||||
dispatch(ipaLayerImageChanged({ layerId, imageDTO }));
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
})
|
||||
);
|
||||
toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
});
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_RG_LAYER_IP_ADAPTER_IMAGE') {
|
||||
const { layerId, ipAdapterId } = postUploadAction;
|
||||
dispatch(rgLayerIPAdapterImageChanged({ layerId, ipAdapterId, imageDTO }));
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
})
|
||||
);
|
||||
toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
});
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_II_LAYER_IMAGE') {
|
||||
const { layerId } = postUploadAction;
|
||||
dispatch(iiLayerImageChanged({ layerId, imageDTO }));
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
})
|
||||
);
|
||||
toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
});
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_NODES_IMAGE') {
|
||||
const { nodeId, fieldName } = postUploadAction;
|
||||
dispatch(fieldImageValueChanged({ nodeId, fieldName, value: imageDTO }));
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: `${t('toast.setNodeField')} ${fieldName}`,
|
||||
})
|
||||
);
|
||||
toast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: `${t('toast.setNodeField')} ${fieldName}`,
|
||||
});
|
||||
return;
|
||||
}
|
||||
},
|
||||
@@ -174,7 +158,7 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.uploadImage.matchRejected,
|
||||
effect: (action, { dispatch }) => {
|
||||
effect: (action) => {
|
||||
const log = logger('images');
|
||||
const sanitizedData = {
|
||||
arg: {
|
||||
@@ -183,13 +167,11 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
},
|
||||
};
|
||||
log.error({ ...sanitizedData }, 'Image upload failed');
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('toast.imageUploadFailed'),
|
||||
description: action.error.message,
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
title: t('toast.imageUploadFailed'),
|
||||
description: action.error.message,
|
||||
status: 'error',
|
||||
});
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -8,8 +8,7 @@ import { loraRemoved } from 'features/lora/store/loraSlice';
|
||||
import { modelSelected } from 'features/parameters/store/actions';
|
||||
import { modelChanged, vaeSelected } from 'features/parameters/store/generationSlice';
|
||||
import { zParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { makeToast } from 'features/system/util/makeToast';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { forEach } from 'lodash-es';
|
||||
|
||||
@@ -60,16 +59,14 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
|
||||
});
|
||||
|
||||
if (modelsCleared > 0) {
|
||||
dispatch(
|
||||
addToast(
|
||||
makeToast({
|
||||
title: t('toast.baseModelChangedCleared', {
|
||||
count: modelsCleared,
|
||||
}),
|
||||
status: 'warning',
|
||||
})
|
||||
)
|
||||
);
|
||||
toast({
|
||||
id: 'BASE_MODEL_CHANGED',
|
||||
title: t('toast.baseModelChanged'),
|
||||
description: t('toast.baseModelChangedCleared', {
|
||||
count: modelsCleared,
|
||||
}),
|
||||
status: 'warning',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,8 +19,7 @@ import {
|
||||
isParameterWidth,
|
||||
zParameterVAEModel,
|
||||
} from 'features/parameters/types/parameterSchemas';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { makeToast } from 'features/system/util/makeToast';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { modelConfigsAdapterSelectors, modelsApi } from 'services/api/endpoints/models';
|
||||
import { isNonRefinerMainModelConfig } from 'services/api/types';
|
||||
@@ -109,7 +108,7 @@ export const addSetDefaultSettingsListener = (startAppListening: AppStartListeni
|
||||
}
|
||||
}
|
||||
|
||||
dispatch(addToast(makeToast({ title: t('toast.parameterSet', { parameter: 'Default settings' }) })));
|
||||
toast({ id: 'PARAMETER_SET', title: t('toast.parameterSet', { parameter: 'Default settings' }) });
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState';
|
||||
import { zNodeStatus } from 'features/nodes/types/invocation';
|
||||
import { socketGeneratorProgress } from 'services/events/actions';
|
||||
|
||||
const log = logger('socketio');
|
||||
@@ -9,6 +12,14 @@ export const addGeneratorProgressEventListener = (startAppListening: AppStartLis
|
||||
actionCreator: socketGeneratorProgress,
|
||||
effect: (action) => {
|
||||
log.trace(action.payload, `Generator progress`);
|
||||
const { source_node_id, step, total_steps, progress_image } = action.payload.data;
|
||||
const nes = deepClone($nodeExecutionStates.get()[source_node_id]);
|
||||
if (nes) {
|
||||
nes.status = zNodeStatus.enum.IN_PROGRESS;
|
||||
nes.progress = (step + 1) / total_steps;
|
||||
nes.progressImage = progress_image ?? null;
|
||||
upsertExecutionState(nes.nodeId, nes);
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
|
||||
import {
|
||||
@@ -9,7 +10,9 @@ import {
|
||||
isImageViewerOpenChanged,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import { IMAGE_CATEGORIES } from 'features/gallery/store/types';
|
||||
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState';
|
||||
import { isImageOutput } from 'features/nodes/types/common';
|
||||
import { zNodeStatus } from 'features/nodes/types/invocation';
|
||||
import { CANVAS_OUTPUT } from 'features/nodes/util/graph/constants';
|
||||
import { boardsApi } from 'services/api/endpoints/boards';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
@@ -28,7 +31,7 @@ export const addInvocationCompleteEventListener = (startAppListening: AppStartLi
|
||||
const { data } = action.payload;
|
||||
log.debug({ data: parseify(data) }, `Invocation complete (${action.payload.data.node.type})`);
|
||||
|
||||
const { result, node, queue_batch_id } = data;
|
||||
const { result, node, queue_batch_id, source_node_id } = data;
|
||||
// This complete event has an associated image output
|
||||
if (isImageOutput(result) && !nodeTypeDenylist.includes(node.type)) {
|
||||
const { image_name } = result.image;
|
||||
@@ -110,6 +113,16 @@ export const addInvocationCompleteEventListener = (startAppListening: AppStartLi
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const nes = deepClone($nodeExecutionStates.get()[source_node_id]);
|
||||
if (nes) {
|
||||
nes.status = zNodeStatus.enum.COMPLETED;
|
||||
if (nes.progress !== null) {
|
||||
nes.progress = 1;
|
||||
}
|
||||
nes.outputs.push(result);
|
||||
upsertExecutionState(nes.nodeId, nes);
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,14 +1,72 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState';
|
||||
import { zNodeStatus } from 'features/nodes/types/invocation';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import ToastWithSessionRefDescription from 'features/toast/ToastWithSessionRefDescription';
|
||||
import { t } from 'i18next';
|
||||
import { startCase } from 'lodash-es';
|
||||
import { socketInvocationError } from 'services/events/actions';
|
||||
|
||||
const log = logger('socketio');
|
||||
|
||||
const getTitle = (errorType: string) => {
|
||||
if (errorType === 'OutOfMemoryError') {
|
||||
return t('toast.outOfMemoryError');
|
||||
}
|
||||
return t('toast.serverError');
|
||||
};
|
||||
|
||||
const getDescription = (errorType: string, sessionId: string, isLocal?: boolean) => {
|
||||
if (!isLocal) {
|
||||
if (errorType === 'OutOfMemoryError') {
|
||||
return ToastWithSessionRefDescription({
|
||||
message: t('toast.outOfMemoryDescription'),
|
||||
sessionId,
|
||||
});
|
||||
}
|
||||
return ToastWithSessionRefDescription({
|
||||
message: errorType,
|
||||
sessionId,
|
||||
});
|
||||
}
|
||||
return errorType;
|
||||
};
|
||||
|
||||
export const addInvocationErrorEventListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
actionCreator: socketInvocationError,
|
||||
effect: (action) => {
|
||||
effect: (action, { getState }) => {
|
||||
log.error(action.payload, `Invocation error (${action.payload.data.node.type})`);
|
||||
const { source_node_id, error_type, error_message, error_traceback, graph_execution_state_id } =
|
||||
action.payload.data;
|
||||
const nes = deepClone($nodeExecutionStates.get()[source_node_id]);
|
||||
if (nes) {
|
||||
nes.status = zNodeStatus.enum.FAILED;
|
||||
nes.progress = null;
|
||||
nes.progressImage = null;
|
||||
|
||||
nes.error = {
|
||||
error_type,
|
||||
error_message,
|
||||
error_traceback,
|
||||
};
|
||||
upsertExecutionState(nes.nodeId, nes);
|
||||
}
|
||||
|
||||
const errorType = startCase(error_type);
|
||||
const sessionId = graph_execution_state_id;
|
||||
const { isLocal } = getState().config;
|
||||
|
||||
toast({
|
||||
id: `INVOCATION_ERROR_${errorType}`,
|
||||
title: getTitle(errorType),
|
||||
status: 'error',
|
||||
duration: null,
|
||||
description: getDescription(errorType, sessionId, isLocal),
|
||||
updateDescription: isLocal ? true : false,
|
||||
});
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { socketInvocationRetrievalError } from 'services/events/actions';
|
||||
|
||||
const log = logger('socketio');
|
||||
|
||||
export const addInvocationRetrievalErrorEventListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
actionCreator: socketInvocationRetrievalError,
|
||||
effect: (action) => {
|
||||
log.error(action.payload, `Invocation retrieval error (${action.payload.data.graph_execution_state_id})`);
|
||||
},
|
||||
});
|
||||
};
|
||||
@@ -1,5 +1,8 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState';
|
||||
import { zNodeStatus } from 'features/nodes/types/invocation';
|
||||
import { socketInvocationStarted } from 'services/events/actions';
|
||||
|
||||
const log = logger('socketio');
|
||||
@@ -9,6 +12,12 @@ export const addInvocationStartedEventListener = (startAppListening: AppStartLis
|
||||
actionCreator: socketInvocationStarted,
|
||||
effect: (action) => {
|
||||
log.debug(action.payload, `Invocation started (${action.payload.data.node.type})`);
|
||||
const { source_node_id } = action.payload.data;
|
||||
const nes = deepClone($nodeExecutionStates.get()[source_node_id]);
|
||||
if (nes) {
|
||||
nes.status = zNodeStatus.enum.IN_PROGRESS;
|
||||
upsertExecutionState(nes.nodeId, nes);
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { $nodeExecutionStates } from 'features/nodes/hooks/useExecutionState';
|
||||
import { zNodeStatus } from 'features/nodes/types/invocation';
|
||||
import { forEach } from 'lodash-es';
|
||||
import { queueApi, queueItemsAdapter } from 'services/api/endpoints/queue';
|
||||
import { socketQueueItemStatusChanged } from 'services/events/actions';
|
||||
|
||||
@@ -39,21 +43,31 @@ export const addSocketQueueItemStatusChangedEventListener = (startAppListening:
|
||||
queueApi.util.updateQueryData('getBatchStatus', { batch_id: batch_status.batch_id }, () => batch_status)
|
||||
);
|
||||
|
||||
// Update the queue item status (this is the full queue item, including the session)
|
||||
dispatch(
|
||||
queueApi.util.updateQueryData('getQueueItem', queue_item.item_id, (draft) => {
|
||||
if (!draft) {
|
||||
return;
|
||||
}
|
||||
Object.assign(draft, queue_item);
|
||||
})
|
||||
);
|
||||
|
||||
// Invalidate caches for things we cannot update
|
||||
// TODO: technically, we could possibly update the current session queue item, but feels safer to just request it again
|
||||
dispatch(
|
||||
queueApi.util.invalidateTags(['CurrentSessionQueueItem', 'NextSessionQueueItem', 'InvocationCacheStatus'])
|
||||
queueApi.util.invalidateTags([
|
||||
'CurrentSessionQueueItem',
|
||||
'NextSessionQueueItem',
|
||||
'InvocationCacheStatus',
|
||||
{ type: 'SessionQueueItem', id: queue_item.item_id },
|
||||
])
|
||||
);
|
||||
|
||||
if (['in_progress'].includes(action.payload.data.queue_item.status)) {
|
||||
forEach($nodeExecutionStates.get(), (nes) => {
|
||||
if (!nes) {
|
||||
return;
|
||||
}
|
||||
const clone = deepClone(nes);
|
||||
clone.status = zNodeStatus.enum.PENDING;
|
||||
clone.error = null;
|
||||
clone.progress = null;
|
||||
clone.progressImage = null;
|
||||
clone.outputs = [];
|
||||
$nodeExecutionStates.setKey(clone.nodeId, clone);
|
||||
});
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { socketSessionRetrievalError } from 'services/events/actions';
|
||||
|
||||
const log = logger('socketio');
|
||||
|
||||
export const addSessionRetrievalErrorEventListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
actionCreator: socketSessionRetrievalError,
|
||||
effect: (action) => {
|
||||
log.error(action.payload, `Session retrieval error (${action.payload.data.graph_execution_state_id})`);
|
||||
},
|
||||
});
|
||||
};
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { stagingAreaImageSaved } from 'features/canvas/store/actions';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
@@ -29,15 +29,14 @@ export const addStagingAreaImageSavedListener = (startAppListening: AppStartList
|
||||
})
|
||||
);
|
||||
}
|
||||
dispatch(addToast({ title: t('toast.imageSaved'), status: 'success' }));
|
||||
toast({ id: 'IMAGE_SAVED', title: t('toast.imageSaved'), status: 'success' });
|
||||
} catch (error) {
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('toast.imageSavingFailed'),
|
||||
description: (error as Error)?.message,
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'IMAGE_SAVE_FAILED',
|
||||
title: t('toast.imageSavingFailed'),
|
||||
description: (error as Error)?.message,
|
||||
status: 'error',
|
||||
});
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { updateAllNodesRequested } from 'features/nodes/store/actions';
|
||||
import { nodeReplaced } from 'features/nodes/store/nodesSlice';
|
||||
import { $templates, nodesChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { NodeUpdateError } from 'features/nodes/types/error';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { getNeedsUpdate, updateNode } from 'features/nodes/util/node/nodeUpdate';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { makeToast } from 'features/system/util/makeToast';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
|
||||
export const addUpdateAllNodesRequestedListener = (startAppListening: AppStartListening) => {
|
||||
@@ -14,7 +13,8 @@ export const addUpdateAllNodesRequestedListener = (startAppListening: AppStartLi
|
||||
actionCreator: updateAllNodesRequested,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
const log = logger('nodes');
|
||||
const { nodes, templates } = getState().nodes;
|
||||
const { nodes } = getState().nodes.present;
|
||||
const templates = $templates.get();
|
||||
|
||||
let unableToUpdateCount = 0;
|
||||
|
||||
@@ -24,13 +24,18 @@ export const addUpdateAllNodesRequestedListener = (startAppListening: AppStartLi
|
||||
unableToUpdateCount++;
|
||||
return;
|
||||
}
|
||||
if (!getNeedsUpdate(node, template)) {
|
||||
if (!getNeedsUpdate(node.data, template)) {
|
||||
// No need to increment the count here, since we're not actually updating
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const updatedNode = updateNode(node, template);
|
||||
dispatch(nodeReplaced({ nodeId: updatedNode.id, node: updatedNode }));
|
||||
dispatch(
|
||||
nodesChanged([
|
||||
{ type: 'remove', id: updatedNode.id },
|
||||
{ type: 'add', item: updatedNode },
|
||||
])
|
||||
);
|
||||
} catch (e) {
|
||||
if (e instanceof NodeUpdateError) {
|
||||
unableToUpdateCount++;
|
||||
@@ -44,24 +49,18 @@ export const addUpdateAllNodesRequestedListener = (startAppListening: AppStartLi
|
||||
count: unableToUpdateCount,
|
||||
})
|
||||
);
|
||||
dispatch(
|
||||
addToast(
|
||||
makeToast({
|
||||
title: t('nodes.unableToUpdateNodes', {
|
||||
count: unableToUpdateCount,
|
||||
}),
|
||||
})
|
||||
)
|
||||
);
|
||||
toast({
|
||||
id: 'UNABLE_TO_UPDATE_NODES',
|
||||
title: t('nodes.unableToUpdateNodes', {
|
||||
count: unableToUpdateCount,
|
||||
}),
|
||||
});
|
||||
} else {
|
||||
dispatch(
|
||||
addToast(
|
||||
makeToast({
|
||||
title: t('nodes.allNodesUpdated'),
|
||||
status: 'success',
|
||||
})
|
||||
)
|
||||
);
|
||||
toast({
|
||||
id: 'ALL_NODES_UPDATED',
|
||||
title: t('nodes.allNodesUpdated'),
|
||||
status: 'success',
|
||||
});
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
@@ -4,7 +4,7 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { buildAdHocUpscaleGraph } from 'features/nodes/util/graph/buildAdHocUpscaleGraph';
|
||||
import { createIsAllowedToUpscaleSelector } from 'features/parameters/hooks/useIsAllowedToUpscale';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig, ImageDTO } from 'services/api/types';
|
||||
@@ -29,12 +29,11 @@ export const addUpscaleRequestedListener = (startAppListening: AppStartListening
|
||||
{ imageDTO },
|
||||
t(detailTKey ?? 'parameters.isAllowedToUpscale.tooLarge') // should never coalesce
|
||||
);
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t(detailTKey ?? 'parameters.isAllowedToUpscale.tooLarge'), // should never coalesce
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'NOT_ALLOWED_TO_UPSCALE',
|
||||
title: t(detailTKey ?? 'parameters.isAllowedToUpscale.tooLarge'), // should never coalesce
|
||||
status: 'error',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -65,12 +64,11 @@ export const addUpscaleRequestedListener = (startAppListening: AppStartListening
|
||||
if (error instanceof Object && 'status' in error && error.status === 403) {
|
||||
return;
|
||||
} else {
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('queue.graphFailedToQueue'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
toast({
|
||||
id: 'GRAPH_QUEUE_FAILED',
|
||||
title: t('queue.graphFailedToQueue'),
|
||||
status: 'error',
|
||||
});
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -2,50 +2,64 @@ import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { workflowLoaded, workflowLoadRequested } from 'features/nodes/store/actions';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { $flow } from 'features/nodes/store/reactFlowInstance';
|
||||
import type { Templates } from 'features/nodes/store/types';
|
||||
import { WorkflowMigrationError, WorkflowVersionError } from 'features/nodes/types/error';
|
||||
import { graphToWorkflow } from 'features/nodes/util/workflow/graphToWorkflow';
|
||||
import { validateWorkflow } from 'features/nodes/util/workflow/validateWorkflow';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { makeToast } from 'features/system/util/makeToast';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { checkBoardAccess, checkImageAccess, checkModelAccess } from 'services/api/hooks/accessChecks';
|
||||
import type { GraphAndWorkflowResponse, NonNullableGraph } from 'services/api/types';
|
||||
import { z } from 'zod';
|
||||
import { fromZodError } from 'zod-validation-error';
|
||||
|
||||
const getWorkflow = async (data: GraphAndWorkflowResponse, templates: Templates) => {
|
||||
if (data.workflow) {
|
||||
// Prefer to load the workflow if it's available - it has more information
|
||||
const parsed = JSON.parse(data.workflow);
|
||||
return await validateWorkflow(parsed, templates, checkImageAccess, checkBoardAccess, checkModelAccess);
|
||||
} else if (data.graph) {
|
||||
// Else we fall back on the graph, using the graphToWorkflow function to convert and do layout
|
||||
const parsed = JSON.parse(data.graph);
|
||||
const workflow = graphToWorkflow(parsed as NonNullableGraph, true);
|
||||
return await validateWorkflow(workflow, templates, checkImageAccess, checkBoardAccess, checkModelAccess);
|
||||
} else {
|
||||
throw new Error('No workflow or graph provided');
|
||||
}
|
||||
};
|
||||
|
||||
export const addWorkflowLoadRequestedListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
actionCreator: workflowLoadRequested,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
effect: async (action, { dispatch }) => {
|
||||
const log = logger('nodes');
|
||||
const { workflow, asCopy } = action.payload;
|
||||
const nodeTemplates = getState().nodes.templates;
|
||||
const { data, asCopy } = action.payload;
|
||||
const nodeTemplates = $templates.get();
|
||||
|
||||
try {
|
||||
const { workflow: validatedWorkflow, warnings } = validateWorkflow(workflow, nodeTemplates);
|
||||
const { workflow, warnings } = await getWorkflow(data, nodeTemplates);
|
||||
|
||||
if (asCopy) {
|
||||
// If we're loading a copy, we need to remove the ID so that the backend will create a new workflow
|
||||
delete validatedWorkflow.id;
|
||||
delete workflow.id;
|
||||
}
|
||||
|
||||
dispatch(workflowLoaded(validatedWorkflow));
|
||||
dispatch(workflowLoaded(workflow));
|
||||
if (!warnings.length) {
|
||||
dispatch(
|
||||
addToast(
|
||||
makeToast({
|
||||
title: t('toast.workflowLoaded'),
|
||||
status: 'success',
|
||||
})
|
||||
)
|
||||
);
|
||||
toast({
|
||||
id: 'WORKFLOW_LOADED',
|
||||
title: t('toast.workflowLoaded'),
|
||||
status: 'success',
|
||||
});
|
||||
} else {
|
||||
dispatch(
|
||||
addToast(
|
||||
makeToast({
|
||||
title: t('toast.loadedWithWarnings'),
|
||||
status: 'warning',
|
||||
})
|
||||
)
|
||||
);
|
||||
toast({
|
||||
id: 'WORKFLOW_LOADED',
|
||||
title: t('toast.loadedWithWarnings'),
|
||||
status: 'warning',
|
||||
});
|
||||
|
||||
warnings.forEach(({ message, ...rest }) => {
|
||||
log.warn(rest, message);
|
||||
});
|
||||
@@ -58,54 +72,42 @@ export const addWorkflowLoadRequestedListener = (startAppListening: AppStartList
|
||||
if (e instanceof WorkflowVersionError) {
|
||||
// The workflow version was not recognized in the valid list of versions
|
||||
log.error({ error: parseify(e) }, e.message);
|
||||
dispatch(
|
||||
addToast(
|
||||
makeToast({
|
||||
title: t('nodes.unableToValidateWorkflow'),
|
||||
status: 'error',
|
||||
description: e.message,
|
||||
})
|
||||
)
|
||||
);
|
||||
toast({
|
||||
id: 'UNABLE_TO_VALIDATE_WORKFLOW',
|
||||
title: t('nodes.unableToValidateWorkflow'),
|
||||
status: 'error',
|
||||
description: e.message,
|
||||
});
|
||||
} else if (e instanceof WorkflowMigrationError) {
|
||||
// There was a problem migrating the workflow to the latest version
|
||||
log.error({ error: parseify(e) }, e.message);
|
||||
dispatch(
|
||||
addToast(
|
||||
makeToast({
|
||||
title: t('nodes.unableToValidateWorkflow'),
|
||||
status: 'error',
|
||||
description: e.message,
|
||||
})
|
||||
)
|
||||
);
|
||||
toast({
|
||||
id: 'UNABLE_TO_VALIDATE_WORKFLOW',
|
||||
title: t('nodes.unableToValidateWorkflow'),
|
||||
status: 'error',
|
||||
description: e.message,
|
||||
});
|
||||
} else if (e instanceof z.ZodError) {
|
||||
// There was a problem validating the workflow itself
|
||||
const { message } = fromZodError(e, {
|
||||
prefix: t('nodes.workflowValidation'),
|
||||
});
|
||||
log.error({ error: parseify(e) }, message);
|
||||
dispatch(
|
||||
addToast(
|
||||
makeToast({
|
||||
title: t('nodes.unableToValidateWorkflow'),
|
||||
status: 'error',
|
||||
description: message,
|
||||
})
|
||||
)
|
||||
);
|
||||
toast({
|
||||
id: 'UNABLE_TO_VALIDATE_WORKFLOW',
|
||||
title: t('nodes.unableToValidateWorkflow'),
|
||||
status: 'error',
|
||||
description: message,
|
||||
});
|
||||
} else {
|
||||
// Some other error occurred
|
||||
log.error({ error: parseify(e) }, t('nodes.unknownErrorValidatingWorkflow'));
|
||||
dispatch(
|
||||
addToast(
|
||||
makeToast({
|
||||
title: t('nodes.unableToValidateWorkflow'),
|
||||
status: 'error',
|
||||
description: t('nodes.unknownErrorValidatingWorkflow'),
|
||||
})
|
||||
)
|
||||
);
|
||||
toast({
|
||||
id: 'UNABLE_TO_VALIDATE_WORKFLOW',
|
||||
title: t('nodes.unableToValidateWorkflow'),
|
||||
status: 'error',
|
||||
description: t('nodes.unknownErrorValidatingWorkflow'),
|
||||
});
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -21,7 +21,8 @@ import { galleryPersistConfig, gallerySlice } from 'features/gallery/store/galle
|
||||
import { hrfPersistConfig, hrfSlice } from 'features/hrf/store/hrfSlice';
|
||||
import { loraPersistConfig, loraSlice } from 'features/lora/store/loraSlice';
|
||||
import { modelManagerV2PersistConfig, modelManagerV2Slice } from 'features/modelManagerV2/store/modelManagerV2Slice';
|
||||
import { nodesPersistConfig, nodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import { nodesPersistConfig, nodesSlice, nodesUndoableConfig } from 'features/nodes/store/nodesSlice';
|
||||
import { workflowSettingsPersistConfig, workflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { workflowPersistConfig, workflowSlice } from 'features/nodes/store/workflowSlice';
|
||||
import { generationPersistConfig, generationSlice } from 'features/parameters/store/generationSlice';
|
||||
import { postprocessingPersistConfig, postprocessingSlice } from 'features/parameters/store/postprocessingSlice';
|
||||
@@ -50,7 +51,7 @@ const allReducers = {
|
||||
[canvasSlice.name]: canvasSlice.reducer,
|
||||
[gallerySlice.name]: gallerySlice.reducer,
|
||||
[generationSlice.name]: generationSlice.reducer,
|
||||
[nodesSlice.name]: nodesSlice.reducer,
|
||||
[nodesSlice.name]: undoable(nodesSlice.reducer, nodesUndoableConfig),
|
||||
[postprocessingSlice.name]: postprocessingSlice.reducer,
|
||||
[systemSlice.name]: systemSlice.reducer,
|
||||
[configSlice.name]: configSlice.reducer,
|
||||
@@ -66,6 +67,7 @@ const allReducers = {
|
||||
[workflowSlice.name]: workflowSlice.reducer,
|
||||
[hrfSlice.name]: hrfSlice.reducer,
|
||||
[controlLayersSlice.name]: undoable(controlLayersSlice.reducer, controlLayersUndoableConfig),
|
||||
[workflowSettingsSlice.name]: workflowSettingsSlice.reducer,
|
||||
[api.reducerPath]: api.reducer,
|
||||
};
|
||||
|
||||
@@ -111,6 +113,7 @@ const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = {
|
||||
[modelManagerV2PersistConfig.name]: modelManagerV2PersistConfig,
|
||||
[hrfPersistConfig.name]: hrfPersistConfig,
|
||||
[controlLayersPersistConfig.name]: controlLayersPersistConfig,
|
||||
[workflowSettingsPersistConfig.name]: workflowSettingsPersistConfig,
|
||||
};
|
||||
|
||||
const unserialize: UnserializeFunction = (data, key) => {
|
||||
|
||||
@@ -74,6 +74,7 @@ export type AppConfig = {
|
||||
maxUpscalePixels?: number;
|
||||
metadataFetchDebounce?: number;
|
||||
workflowFetchDebounce?: number;
|
||||
isLocal?: boolean;
|
||||
sd: {
|
||||
defaultModel?: string;
|
||||
disabledControlNetModels: string[];
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import { useAppToaster } from 'app/components/Toaster';
|
||||
import { useImageUrlToBlob } from 'common/hooks/useImageUrlToBlob';
|
||||
import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const useCopyImageToClipboard = () => {
|
||||
const toaster = useAppToaster();
|
||||
const { t } = useTranslation();
|
||||
const imageUrlToBlob = useImageUrlToBlob();
|
||||
|
||||
@@ -16,12 +15,11 @@ export const useCopyImageToClipboard = () => {
|
||||
const copyImageToClipboard = useCallback(
|
||||
async (image_url: string) => {
|
||||
if (!isClipboardAPIAvailable) {
|
||||
toaster({
|
||||
toast({
|
||||
id: 'PROBLEM_COPYING_IMAGE',
|
||||
title: t('toast.problemCopyingImage'),
|
||||
description: "Your browser doesn't support the Clipboard API.",
|
||||
status: 'error',
|
||||
duration: 2500,
|
||||
isClosable: true,
|
||||
});
|
||||
}
|
||||
try {
|
||||
@@ -33,23 +31,21 @@ export const useCopyImageToClipboard = () => {
|
||||
|
||||
copyBlobToClipboard(blob);
|
||||
|
||||
toaster({
|
||||
toast({
|
||||
id: 'IMAGE_COPIED',
|
||||
title: t('toast.imageCopied'),
|
||||
status: 'success',
|
||||
duration: 2500,
|
||||
isClosable: true,
|
||||
});
|
||||
} catch (err) {
|
||||
toaster({
|
||||
toast({
|
||||
id: 'PROBLEM_COPYING_IMAGE',
|
||||
title: t('toast.problemCopyingImage'),
|
||||
description: String(err),
|
||||
status: 'error',
|
||||
duration: 2500,
|
||||
isClosable: true,
|
||||
});
|
||||
}
|
||||
},
|
||||
[imageUrlToBlob, isClipboardAPIAvailable, t, toaster]
|
||||
[imageUrlToBlob, isClipboardAPIAvailable, t]
|
||||
);
|
||||
|
||||
return { isClipboardAPIAvailable, copyImageToClipboard };
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppToaster } from 'app/components/Toaster';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { imageDownloaded } from 'features/gallery/store/actions';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const useDownloadImage = () => {
|
||||
const toaster = useAppToaster();
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const authToken = useStore($authToken);
|
||||
@@ -37,16 +36,15 @@ export const useDownloadImage = () => {
|
||||
window.URL.revokeObjectURL(url);
|
||||
dispatch(imageDownloaded());
|
||||
} catch (err) {
|
||||
toaster({
|
||||
toast({
|
||||
id: 'PROBLEM_DOWNLOADING_IMAGE',
|
||||
title: t('toast.problemDownloadingImage'),
|
||||
description: String(err),
|
||||
status: 'error',
|
||||
duration: 2500,
|
||||
isClosable: true,
|
||||
});
|
||||
}
|
||||
},
|
||||
[t, toaster, dispatch, authToken]
|
||||
[t, dispatch, authToken]
|
||||
);
|
||||
|
||||
return { downloadImage };
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { useAppToaster } from 'app/components/Toaster';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { useCallback, useEffect, useState } from 'react';
|
||||
import type { Accept, FileRejection } from 'react-dropzone';
|
||||
@@ -26,7 +26,6 @@ const selectPostUploadAction = createMemoizedSelector(activeTabNameSelector, (ac
|
||||
|
||||
export const useFullscreenDropzone = () => {
|
||||
const { t } = useTranslation();
|
||||
const toaster = useAppToaster();
|
||||
const postUploadAction = useAppSelector(selectPostUploadAction);
|
||||
const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId);
|
||||
const [isHandlingUpload, setIsHandlingUpload] = useState<boolean>(false);
|
||||
@@ -37,13 +36,14 @@ export const useFullscreenDropzone = () => {
|
||||
(rejection: FileRejection) => {
|
||||
setIsHandlingUpload(true);
|
||||
|
||||
toaster({
|
||||
toast({
|
||||
id: 'UPLOAD_FAILED',
|
||||
title: t('toast.uploadFailed'),
|
||||
description: rejection.errors.map((error) => error.message).join('\n'),
|
||||
status: 'error',
|
||||
});
|
||||
},
|
||||
[t, toaster]
|
||||
[t]
|
||||
);
|
||||
|
||||
const fileAcceptedCallback = useCallback(
|
||||
@@ -62,7 +62,8 @@ export const useFullscreenDropzone = () => {
|
||||
const onDrop = useCallback(
|
||||
(acceptedFiles: Array<File>, fileRejections: Array<FileRejection>) => {
|
||||
if (fileRejections.length > 1) {
|
||||
toaster({
|
||||
toast({
|
||||
id: 'UPLOAD_FAILED',
|
||||
title: t('toast.uploadFailed'),
|
||||
description: t('toast.uploadFailedInvalidUploadDesc'),
|
||||
status: 'error',
|
||||
@@ -78,7 +79,7 @@ export const useFullscreenDropzone = () => {
|
||||
fileAcceptedCallback(file);
|
||||
});
|
||||
},
|
||||
[t, toaster, fileAcceptedCallback, fileRejectionCallback]
|
||||
[t, fileAcceptedCallback, fileRejectionCallback]
|
||||
);
|
||||
|
||||
const onDragOver = useCallback(() => {
|
||||
|
||||
@@ -13,6 +13,7 @@ type UseGroupedModelComboboxArg<T extends AnyModelConfig> = {
|
||||
onChange: (value: T | null) => void;
|
||||
getIsDisabled?: (model: T) => boolean;
|
||||
isLoading?: boolean;
|
||||
groupByType?: boolean;
|
||||
};
|
||||
|
||||
type UseGroupedModelComboboxReturn = {
|
||||
@@ -23,17 +24,21 @@ type UseGroupedModelComboboxReturn = {
|
||||
noOptionsMessage: () => string;
|
||||
};
|
||||
|
||||
const groupByBaseFunc = <T extends AnyModelConfig>(model: T) => model.base.toUpperCase();
|
||||
const groupByBaseAndTypeFunc = <T extends AnyModelConfig>(model: T) =>
|
||||
`${model.base.toUpperCase()} / ${model.type.replaceAll('_', ' ').toUpperCase()}`;
|
||||
|
||||
export const useGroupedModelCombobox = <T extends AnyModelConfig>(
|
||||
arg: UseGroupedModelComboboxArg<T>
|
||||
): UseGroupedModelComboboxReturn => {
|
||||
const { t } = useTranslation();
|
||||
const base_model = useAppSelector((s) => s.generation.model?.base ?? 'sdxl');
|
||||
const { modelConfigs, selectedModel, getIsDisabled, onChange, isLoading } = arg;
|
||||
const { modelConfigs, selectedModel, getIsDisabled, onChange, isLoading, groupByType = false } = arg;
|
||||
const options = useMemo<GroupBase<ComboboxOption>[]>(() => {
|
||||
if (!modelConfigs) {
|
||||
return [];
|
||||
}
|
||||
const groupedModels = groupBy(modelConfigs, 'base');
|
||||
const groupedModels = groupBy(modelConfigs, groupByType ? groupByBaseAndTypeFunc : groupByBaseFunc);
|
||||
const _options = reduce(
|
||||
groupedModels,
|
||||
(acc, val, label) => {
|
||||
@@ -49,9 +54,9 @@ export const useGroupedModelCombobox = <T extends AnyModelConfig>(
|
||||
},
|
||||
[] as GroupBase<ComboboxOption>[]
|
||||
);
|
||||
_options.sort((a) => (a.label === base_model ? -1 : 1));
|
||||
_options.sort((a) => (a.label?.split('/')[0]?.toLowerCase().includes(base_model) ? -1 : 1));
|
||||
return _options;
|
||||
}, [getIsDisabled, modelConfigs, base_model]);
|
||||
}, [modelConfigs, groupByType, getIsDisabled, base_model]);
|
||||
|
||||
const value = useMemo(
|
||||
() =>
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import {
|
||||
@@ -6,187 +7,230 @@ import {
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
|
||||
import { selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { Layer } from 'features/controlLayers/store/types';
|
||||
import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
|
||||
import { selectNodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import { $templates, selectNodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import type { Templates } from 'features/nodes/store/types';
|
||||
import { selectWorkflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { selectGenerationSlice } from 'features/parameters/store/generationSlice';
|
||||
import { selectSystemSlice } from 'features/system/store/systemSlice';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import i18n from 'i18next';
|
||||
import { forEach } from 'lodash-es';
|
||||
import { forEach, upperFirst } from 'lodash-es';
|
||||
import { useMemo } from 'react';
|
||||
import { getConnectedEdges } from 'reactflow';
|
||||
|
||||
const selector = createMemoizedSelector(
|
||||
[
|
||||
selectControlAdaptersSlice,
|
||||
selectGenerationSlice,
|
||||
selectSystemSlice,
|
||||
selectNodesSlice,
|
||||
selectDynamicPromptsSlice,
|
||||
selectControlLayersSlice,
|
||||
activeTabNameSelector,
|
||||
],
|
||||
(controlAdapters, generation, system, nodes, dynamicPrompts, controlLayers, activeTabName) => {
|
||||
const { model } = generation;
|
||||
const { positivePrompt } = controlLayers.present;
|
||||
const LAYER_TYPE_TO_TKEY: Record<Layer['type'], string> = {
|
||||
initial_image_layer: 'controlLayers.globalInitialImage',
|
||||
control_adapter_layer: 'controlLayers.globalControlAdapter',
|
||||
ip_adapter_layer: 'controlLayers.globalIPAdapter',
|
||||
regional_guidance_layer: 'controlLayers.regionalGuidance',
|
||||
};
|
||||
|
||||
const { isConnected } = system;
|
||||
const createSelector = (templates: Templates) =>
|
||||
createMemoizedSelector(
|
||||
[
|
||||
selectControlAdaptersSlice,
|
||||
selectGenerationSlice,
|
||||
selectSystemSlice,
|
||||
selectNodesSlice,
|
||||
selectWorkflowSettingsSlice,
|
||||
selectDynamicPromptsSlice,
|
||||
selectControlLayersSlice,
|
||||
activeTabNameSelector,
|
||||
],
|
||||
(controlAdapters, generation, system, nodes, workflowSettings, dynamicPrompts, controlLayers, activeTabName) => {
|
||||
const { model } = generation;
|
||||
const { size } = controlLayers.present;
|
||||
const { positivePrompt } = controlLayers.present;
|
||||
|
||||
const reasons: string[] = [];
|
||||
const { isConnected } = system;
|
||||
|
||||
// Cannot generate if not connected
|
||||
if (!isConnected) {
|
||||
reasons.push(i18n.t('parameters.invoke.systemDisconnected'));
|
||||
}
|
||||
const reasons: { prefix?: string; content: string }[] = [];
|
||||
|
||||
if (activeTabName === 'workflows') {
|
||||
if (nodes.shouldValidateGraph) {
|
||||
if (!nodes.nodes.length) {
|
||||
reasons.push(i18n.t('parameters.invoke.noNodesInGraph'));
|
||||
// Cannot generate if not connected
|
||||
if (!isConnected) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.systemDisconnected') });
|
||||
}
|
||||
|
||||
if (activeTabName === 'workflows') {
|
||||
if (workflowSettings.shouldValidateGraph) {
|
||||
if (!nodes.nodes.length) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noNodesInGraph') });
|
||||
}
|
||||
|
||||
nodes.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const nodeTemplate = templates[node.data.type];
|
||||
|
||||
if (!nodeTemplate) {
|
||||
// Node type not found
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingNodeTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
const connectedEdges = getConnectedEdges([node], nodes.edges);
|
||||
|
||||
forEach(node.data.inputs, (field) => {
|
||||
const fieldTemplate = nodeTemplate.inputs[field.name];
|
||||
const hasConnection = connectedEdges.some(
|
||||
(edge) => edge.target === node.id && edge.targetHandle === field.name
|
||||
);
|
||||
|
||||
if (!fieldTemplate) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingFieldTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
if (fieldTemplate.required && field.value === undefined && !hasConnection) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.missingInputForField', {
|
||||
nodeLabel: node.data.label || nodeTemplate.title,
|
||||
fieldLabel: field.label || fieldTemplate.title,
|
||||
}),
|
||||
});
|
||||
return;
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (dynamicPrompts.prompts.length === 0 && getShouldProcessPrompt(positivePrompt)) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noPrompts') });
|
||||
}
|
||||
|
||||
nodes.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
if (!model) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelSelected') });
|
||||
}
|
||||
|
||||
const nodeTemplate = nodes.templates[node.data.type];
|
||||
if (activeTabName === 'generation') {
|
||||
// Handling for generation tab
|
||||
controlLayers.present.layers
|
||||
.filter((l) => l.isEnabled)
|
||||
.forEach((l, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layers_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[l.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
if (l.type === 'control_adapter_layer') {
|
||||
// Must have model
|
||||
if (!l.controlAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (l.controlAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have a control image OR, if it has a processor, it must have a processed image
|
||||
if (!l.controlAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterNoImageSelected'));
|
||||
} else if (l.controlAdapter.processorConfig && !l.controlAdapter.processedImage) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterImageNotProcessed'));
|
||||
}
|
||||
// T2I Adapters require images have dimensions that are multiples of 64 (SD1.5) or 32 (SDXL)
|
||||
if (l.controlAdapter.type === 't2i_adapter') {
|
||||
const multiple = model?.base === 'sdxl' ? 32 : 64;
|
||||
if (size.width % multiple !== 0 || size.height % multiple !== 0) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.t2iAdapterIncompatibleDimensions', { multiple }));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!nodeTemplate) {
|
||||
// Node type not found
|
||||
reasons.push(i18n.t('parameters.invoke.missingNodeTemplate'));
|
||||
return;
|
||||
}
|
||||
if (l.type === 'ip_adapter_layer') {
|
||||
// Must have model
|
||||
if (!l.ipAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (l.ipAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have an image
|
||||
if (!l.ipAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoImageSelected'));
|
||||
}
|
||||
}
|
||||
|
||||
const connectedEdges = getConnectedEdges([node], nodes.edges);
|
||||
if (l.type === 'initial_image_layer') {
|
||||
// Must have an image
|
||||
if (!l.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.initialImageNoImageSelected'));
|
||||
}
|
||||
}
|
||||
|
||||
forEach(node.data.inputs, (field) => {
|
||||
const fieldTemplate = nodeTemplate.inputs[field.name];
|
||||
const hasConnection = connectedEdges.some(
|
||||
(edge) => edge.target === node.id && edge.targetHandle === field.name
|
||||
);
|
||||
if (l.type === 'regional_guidance_layer') {
|
||||
// Must have a region
|
||||
if (l.maskObjects.length === 0) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.rgNoRegion'));
|
||||
}
|
||||
// Must have at least 1 prompt or IP Adapter
|
||||
if (l.positivePrompt === null && l.negativePrompt === null && l.ipAdapters.length === 0) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.rgNoPromptsOrIPAdapters'));
|
||||
}
|
||||
l.ipAdapters.forEach((ipAdapter) => {
|
||||
// Must have model
|
||||
if (!ipAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (ipAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have an image
|
||||
if (!ipAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoImageSelected'));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (!fieldTemplate) {
|
||||
reasons.push(i18n.t('parameters.invoke.missingFieldTemplate'));
|
||||
return;
|
||||
}
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Handling for all other tabs
|
||||
selectControlAdapterAll(controlAdapters)
|
||||
.filter((ca) => ca.isEnabled)
|
||||
.forEach((ca, i) => {
|
||||
if (!ca.isEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (fieldTemplate.required && field.value === undefined && !hasConnection) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.missingInputForField', {
|
||||
nodeLabel: node.data.label || nodeTemplate.title,
|
||||
fieldLabel: field.label || fieldTemplate.title,
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (dynamicPrompts.prompts.length === 0 && getShouldProcessPrompt(positivePrompt)) {
|
||||
reasons.push(i18n.t('parameters.invoke.noPrompts'));
|
||||
if (!ca.model) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelForControlAdapter', { number: i + 1 }) });
|
||||
} else if (ca.model.base !== model?.base) {
|
||||
// This should never happen, just a sanity check
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', { number: i + 1 }),
|
||||
});
|
||||
}
|
||||
|
||||
if (
|
||||
!ca.controlImage ||
|
||||
(isControlNetOrT2IAdapter(ca) && !ca.processedControlImage && ca.processorType !== 'none')
|
||||
) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.noControlImageForControlAdapter', { number: i + 1 }),
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (!model) {
|
||||
reasons.push(i18n.t('parameters.invoke.noModelSelected'));
|
||||
}
|
||||
|
||||
if (activeTabName === 'generation') {
|
||||
// Handling for generation tab
|
||||
controlLayers.present.layers
|
||||
.filter((l) => l.isEnabled)
|
||||
.flatMap((l) => {
|
||||
if (l.type === 'control_adapter_layer') {
|
||||
return l.controlAdapter;
|
||||
} else if (l.type === 'ip_adapter_layer') {
|
||||
return l.ipAdapter;
|
||||
} else if (l.type === 'regional_guidance_layer') {
|
||||
return l.ipAdapters;
|
||||
}
|
||||
return [];
|
||||
})
|
||||
.forEach((ca, i) => {
|
||||
const hasNoModel = !ca.model;
|
||||
const mismatchedModelBase = ca.model?.base !== model?.base;
|
||||
const hasNoImage = !ca.image;
|
||||
const imageNotProcessed =
|
||||
(ca.type === 'controlnet' || ca.type === 't2i_adapter') && !ca.processedImage && ca.processorConfig;
|
||||
|
||||
if (hasNoModel) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
if (mismatchedModelBase) {
|
||||
// This should never happen, just a sanity check
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
if (hasNoImage) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noControlImageForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
if (imageNotProcessed) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.imageNotProcessedForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Handling for all other tabs
|
||||
selectControlAdapterAll(controlAdapters)
|
||||
.filter((ca) => ca.isEnabled)
|
||||
.forEach((ca, i) => {
|
||||
if (!ca.isEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ca.model) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
} else if (ca.model.base !== model?.base) {
|
||||
// This should never happen, just a sanity check
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
!ca.controlImage ||
|
||||
(isControlNetOrT2IAdapter(ca) && !ca.processedControlImage && ca.processorType !== 'none')
|
||||
) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noControlImageForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
return { isReady: !reasons.length, reasons };
|
||||
}
|
||||
|
||||
return { isReady: !reasons.length, reasons };
|
||||
}
|
||||
);
|
||||
);
|
||||
|
||||
export const useIsReadyToEnqueue = () => {
|
||||
const { isReady, reasons } = useAppSelector(selector);
|
||||
return { isReady, reasons };
|
||||
const templates = useStore($templates);
|
||||
const selector = useMemo(() => createSelector(templates), [templates]);
|
||||
const value = useAppSelector(selector);
|
||||
return value;
|
||||
};
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user