mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 09:18:00 -05:00
Compare commits
216 Commits
v4.2.0
...
v4.2.2post
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa0c59bb51 | ||
|
|
e4acaa5c8f | ||
|
|
9ba47cae20 | ||
|
|
bf4310ca71 | ||
|
|
e75f98317f | ||
|
|
1249d4a6e3 | ||
|
|
66c9f4708d | ||
|
|
32277193b6 | ||
|
|
620ee2875e | ||
|
|
5553588147 | ||
|
|
1c29b3bd85 | ||
|
|
e88b807a13 | ||
|
|
9e55ef3d4b | ||
|
|
8062a47d16 | ||
|
|
dba8c43ecb | ||
|
|
8ebf2ddf15 | ||
|
|
f4625c2671 | ||
|
|
c94742bde6 | ||
|
|
a34faf0bd8 | ||
|
|
ecfff6cb1e | ||
|
|
ba8bed6870 | ||
|
|
ca186bca61 | ||
|
|
e2f109807c | ||
|
|
281bd31db2 | ||
|
|
cea1874e00 | ||
|
|
89b0e9e4de | ||
|
|
26d0d55d97 | ||
|
|
059c5586a4 | ||
|
|
9ed5698aa8 | ||
|
|
0b5696c5d4 | ||
|
|
a51142674a | ||
|
|
b8b671c0db | ||
|
|
7cceafe0dd | ||
|
|
cbe32b647a | ||
|
|
9a8e0842bb | ||
|
|
1d7671298f | ||
|
|
e38d75c3dc | ||
|
|
21fab9785a | ||
|
|
b3429553bb | ||
|
|
e480844042 | ||
|
|
26029108f7 | ||
|
|
504ac82077 | ||
|
|
6b11740dda | ||
|
|
a80e3448f5 | ||
|
|
4bda174eb9 | ||
|
|
b1e28c2f2c | ||
|
|
83000a4190 | ||
|
|
c98205d0d7 | ||
|
|
ce2ad5903c | ||
|
|
fe3980a369 | ||
|
|
ea97ae5ae8 | ||
|
|
3605b6b1a3 | ||
|
|
fc31dddbf7 | ||
|
|
6ad01d824d | ||
|
|
78f9f3ee95 | ||
|
|
972398d203 | ||
|
|
857889d1fa | ||
|
|
8074a802d6 | ||
|
|
059d5a682c | ||
|
|
00c2d8f95d | ||
|
|
04a596179b | ||
|
|
3fcb2720d7 | ||
|
|
6f7160b9fd | ||
|
|
6b4e464d17 | ||
|
|
9f7841a04b | ||
|
|
468644ab18 | ||
|
|
9d127fee6b | ||
|
|
6658897210 | ||
|
|
af7b194bec | ||
|
|
de1ea50e6d | ||
|
|
2680ef52c2 | ||
|
|
a012bb6e07 | ||
|
|
6a2c53f6c5 | ||
|
|
2cbf7d9221 | ||
|
|
fe7ed72c9c | ||
|
|
85a5a7c47a | ||
|
|
af3fd26d4e | ||
|
|
5127fd6320 | ||
|
|
124d34a8cc | ||
|
|
e8387d7523 | ||
|
|
a5d08c981b | ||
|
|
811d0da0f0 | ||
|
|
17e1fc5254 | ||
|
|
84e031edc2 | ||
|
|
b6b7e737e0 | ||
|
|
5f3e7afd45 | ||
|
|
b0cfca9d24 | ||
|
|
985ef89825 | ||
|
|
5928ade5fd | ||
|
|
93ebc175c6 | ||
|
|
386d552493 | ||
|
|
799cf06d20 | ||
|
|
922716d2ab | ||
|
|
66fc110b64 | ||
|
|
822f1e1f06 | ||
|
|
5d60c3c8e1 | ||
|
|
4e21d01c7f | ||
|
|
6b7b0b3777 | ||
|
|
07feb5ba07 | ||
|
|
a18d7adad4 | ||
|
|
32dff2c4e3 | ||
|
|
575ecb4028 | ||
|
|
ad8778df6c | ||
|
|
d2f5103f9f | ||
|
|
dd42a56084 | ||
|
|
23ac340a3f | ||
|
|
6791b4eaa8 | ||
|
|
a8b042177d | ||
|
|
76825f4261 | ||
|
|
78cb4d75ad | ||
|
|
a18bbac262 | ||
|
|
9ff5596963 | ||
|
|
8ea596b1e9 | ||
|
|
e3a143eaed | ||
|
|
c359ab6d9b | ||
|
|
dbfaa07e03 | ||
|
|
7f78fe7a36 | ||
|
|
6cf5b402c6 | ||
|
|
b0c7c7cb47 | ||
|
|
4d68cd8dbb | ||
|
|
2c1fa30639 | ||
|
|
708c68413d | ||
|
|
1d884fb794 | ||
|
|
f6a44681a8 | ||
|
|
d4df312300 | ||
|
|
9c0d44b412 | ||
|
|
27826369f0 | ||
|
|
31d8b50276 | ||
|
|
40b4fa7238 | ||
|
|
3b1743b7c2 | ||
|
|
f489c818f1 | ||
|
|
af477fa295 | ||
|
|
0ff0290735 | ||
|
|
67dbe6d949 | ||
|
|
4c3c2297b9 | ||
|
|
cadea55521 | ||
|
|
c8f30b1392 | ||
|
|
3d14a98abf | ||
|
|
77024bfca7 | ||
|
|
4a1c3786a1 | ||
|
|
b239891986 | ||
|
|
9fb03d43ff | ||
|
|
bdc59786bd | ||
|
|
fb6e926500 | ||
|
|
48ccd63dba | ||
|
|
ee647a05dc | ||
|
|
154b52ca4d | ||
|
|
5dd460c3ce | ||
|
|
4897ce2a13 | ||
|
|
5425526d50 | ||
|
|
5a4b050e66 | ||
|
|
8d39520232 | ||
|
|
04d12a1e98 | ||
|
|
39aa70963b | ||
|
|
5743254a41 | ||
|
|
c538ffea26 | ||
|
|
e8d3a7c870 | ||
|
|
2be66b1546 | ||
|
|
76e181fd44 | ||
|
|
b5d42fbc66 | ||
|
|
b463cd763e | ||
|
|
eb320df41d | ||
|
|
de1869773f | ||
|
|
ef89c7e537 | ||
|
|
008645d386 | ||
|
|
f8042ffb41 | ||
|
|
dbe22be598 | ||
|
|
8f6078d007 | ||
|
|
4020bf47e2 | ||
|
|
9d685da759 | ||
|
|
e3289856c0 | ||
|
|
47b8153728 | ||
|
|
7901e4c082 | ||
|
|
18b0977a31 | ||
|
|
fc6b214470 | ||
|
|
e22211dac0 | ||
|
|
e222484663 | ||
|
|
2a9cea6689 | ||
|
|
93da75209c | ||
|
|
9c819f0fd8 | ||
|
|
eef6fcf286 | ||
|
|
e375d9f787 | ||
|
|
ab18174774 | ||
|
|
9265841384 | ||
|
|
c5fd08125d | ||
|
|
11d88dae7f | ||
|
|
3b495659b0 | ||
|
|
15c9a3a4b6 | ||
|
|
60e77e4ed6 | ||
|
|
fa832a8ac6 | ||
|
|
f7834d7d59 | ||
|
|
63d7461510 | ||
|
|
1de704160e | ||
|
|
b118a2565c | ||
|
|
eb166baafe | ||
|
|
818d37f304 | ||
|
|
9cdb801c1c | ||
|
|
5da8cde4fc | ||
|
|
6ec3dc0c0d | ||
|
|
6050dffb25 | ||
|
|
93efeafe30 | ||
|
|
f167e8a8d3 | ||
|
|
124d49f35e | ||
|
|
52d8efa892 | ||
|
|
4ea8416c68 | ||
|
|
8dd0bfb068 | ||
|
|
6ff1c7d541 | ||
|
|
19f5a9c3a9 | ||
|
|
d9ce9c62ac | ||
|
|
cdc468a38c | ||
|
|
2656f13a4a | ||
|
|
da61396b1c | ||
|
|
6c9fb617dc | ||
|
|
5dd73fe53e | ||
|
|
e6793be465 | ||
|
|
63e62c5720 |
@@ -117,13 +117,13 @@ Stateless fields do not store their value in the node, so their field instances
|
||||
|
||||
"Custom" fields will always be treated as stateless fields.
|
||||
|
||||
##### Collection and Scalar Fields
|
||||
##### Single and Collection Fields
|
||||
|
||||
Field types have a name and two flags which may identify it as a **collection** or **collection or scalar** field.
|
||||
Field types have a name and cardinality property which may identify it as a **SINGLE**, **COLLECTION** or **SINGLE_OR_COLLECTION** field.
|
||||
|
||||
If a field is annotated in python as a list, its field type is parsed and flagged as a **collection** type (e.g. `list[int]`).
|
||||
|
||||
If it is annotated as a union of a type and list, the type will be flagged as a **collection or scalar** type (e.g. `Union[int, list[int]]`). Fields may not be unions of different types (e.g. `Union[int, list[str]]` and `Union[int, str]` are not allowed).
|
||||
- If a field is annotated in python as a singular value or class, its field type is parsed as a **SINGLE** type (e.g. `int`, `ImageField`, `str`).
|
||||
- If a field is annotated in python as a list, its field type is parsed as a **COLLECTION** type (e.g. `list[int]`).
|
||||
- If it is annotated as a union of a type and list, the type will be parsed as a **SINGLE_OR_COLLECTION** type (e.g. `Union[int, list[int]]`). Fields may not be unions of different types (e.g. `Union[int, list[str]]` and `Union[int, str]` are not allowed).
|
||||
|
||||
## Implementation
|
||||
|
||||
@@ -173,8 +173,7 @@ Field types are represented as structured objects:
|
||||
```ts
|
||||
type FieldType = {
|
||||
name: string;
|
||||
isCollection: boolean;
|
||||
isCollectionOrScalar: boolean;
|
||||
cardinality: 'SINGLE' | 'COLLECTION' | 'SINGLE_OR_COLLECTION';
|
||||
};
|
||||
```
|
||||
|
||||
@@ -186,7 +185,7 @@ There are 4 general cases for field type parsing.
|
||||
|
||||
When a field is annotated as a primitive values (e.g. `int`, `str`, `float`), the field type parsing is fairly straightforward. The field is represented by a simple OpenAPI **schema object**, which has a `type` property.
|
||||
|
||||
We create a field type name from this `type` string (e.g. `string` -> `StringField`).
|
||||
We create a field type name from this `type` string (e.g. `string` -> `StringField`). The cardinality is `"SINGLE"`.
|
||||
|
||||
##### Complex Types
|
||||
|
||||
@@ -200,13 +199,13 @@ We need to **dereference** the schema to pull these out. Dereferencing may requi
|
||||
|
||||
When a field is annotated as a list of a single type, the schema object has an `items` property. They may be a schema object or reference object and must be parsed to determine the item type.
|
||||
|
||||
We use the item type for field type name, adding `isCollection: true` to the field type.
|
||||
We use the item type for field type name. The cardinality is `"COLLECTION"`.
|
||||
|
||||
##### Collection or Scalar Types
|
||||
##### Single or Collection Types
|
||||
|
||||
When a field is annotated as a union of a type and list of that type, the schema object has an `anyOf` property, which holds a list of valid types for the union.
|
||||
|
||||
After verifying that the union has two members (a type and list of the same type), we use the type for field type name, adding `isCollectionOrScalar: true` to the field type.
|
||||
After verifying that the union has two members (a type and list of the same type), we use the type for field type name, with cardinality `"SINGLE_OR_COLLECTION"`.
|
||||
|
||||
##### Optional Fields
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ Updating is exactly the same as installing - download the latest installer, choo
|
||||
|
||||
If you have installation issues, please review the [FAQ]. You can also [create an issue] or ask for help on [discord].
|
||||
|
||||
[installation requirements]: INSTALLATION.md#installation-requirements
|
||||
[installation requirements]: INSTALL_REQUIREMENTS.md
|
||||
[FAQ]: ../help/FAQ.md
|
||||
[install some models]: 050_INSTALLING_MODELS.md
|
||||
[configuration docs]: ../features/CONFIGURATION.md
|
||||
|
||||
@@ -10,7 +10,7 @@ InvokeAI is distributed as a python package on PyPI, installable with `pip`. The
|
||||
|
||||
### Requirements
|
||||
|
||||
Before you start, go through the [installation requirements].
|
||||
Before you start, go through the [installation requirements](./INSTALL_REQUIREMENTS.md).
|
||||
|
||||
### Installation Walkthrough
|
||||
|
||||
@@ -79,7 +79,7 @@ Before you start, go through the [installation requirements].
|
||||
|
||||
1. Install the InvokeAI Package. The base command is `pip install InvokeAI --use-pep517`, but you may need to change this depending on your system and the desired features.
|
||||
|
||||
- You may need to provide an [extra index URL]. Select your platform configuration using [this tool on the PyTorch website]. Copy the `--extra-index-url` string from this and append it to your install command.
|
||||
- You may need to provide an [extra index URL](https://pip.pypa.io/en/stable/cli/pip_install/#cmdoption-extra-index-url). Select your platform configuration using [this tool on the PyTorch website](https://pytorch.org/get-started/locally/). Copy the `--extra-index-url` string from this and append it to your install command.
|
||||
|
||||
!!! example "Install with an extra index URL"
|
||||
|
||||
@@ -116,4 +116,4 @@ Before you start, go through the [installation requirements].
|
||||
|
||||
!!! warning
|
||||
|
||||
If the virtual environment is _not_ inside the root directory, then you _must_ specify the path to the root directory with `--root_dir \path\to\invokeai` or the `INVOKEAI_ROOT` environment variable.
|
||||
If the virtual environment is _not_ inside the root directory, then you _must_ specify the path to the root directory with `--root \path\to\invokeai` or the `INVOKEAI_ROOT` environment variable.
|
||||
|
||||
@@ -37,13 +37,13 @@ Invoke runs best with a dedicated GPU, but will fall back to running on CPU, alb
|
||||
=== "Nvidia"
|
||||
|
||||
```
|
||||
Any GPU with at least 8GB VRAM. Linux only.
|
||||
Any GPU with at least 8GB VRAM.
|
||||
```
|
||||
|
||||
=== "AMD"
|
||||
|
||||
```
|
||||
Any GPU with at least 16GB VRAM.
|
||||
Any GPU with at least 16GB VRAM. Linux only.
|
||||
```
|
||||
|
||||
=== "Mac"
|
||||
|
||||
@@ -13,7 +13,6 @@ from pydantic import BaseModel, Field
|
||||
from invokeai.app.invocations.upscale import ESRGAN_MODELS
|
||||
from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus
|
||||
from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch
|
||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||
from invokeai.backend.util.logging import logging
|
||||
from invokeai.version import __version__
|
||||
|
||||
@@ -109,9 +108,7 @@ async def get_config() -> AppConfig:
|
||||
upscaling_models.append(str(Path(model).stem))
|
||||
upscaler = Upscaler(upscaling_method="esrgan", upscaling_models=upscaling_models)
|
||||
|
||||
nsfw_methods = []
|
||||
if SafetyChecker.safety_checker_available():
|
||||
nsfw_methods.append("nsfw_checker")
|
||||
nsfw_methods = ["nsfw_checker"]
|
||||
|
||||
watermarking_methods = ["invisible_watermark"]
|
||||
|
||||
|
||||
@@ -6,13 +6,12 @@ from fastapi import BackgroundTasks, Body, HTTPException, Path, Query, Request,
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.routing import APIRouter
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
from pydantic import BaseModel, Field, JsonValue
|
||||
|
||||
from invokeai.app.invocations.fields import MetadataField, MetadataFieldValidator
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin
|
||||
from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID, WorkflowWithoutIDValidator
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
@@ -42,13 +41,17 @@ async def upload_image(
|
||||
board_id: Optional[str] = Query(default=None, description="The board to add this image to, if any"),
|
||||
session_id: Optional[str] = Query(default=None, description="The session ID associated with this upload, if any"),
|
||||
crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"),
|
||||
metadata: Optional[JsonValue] = Body(
|
||||
default=None, description="The metadata to associate with the image", embed=True
|
||||
),
|
||||
) -> ImageDTO:
|
||||
"""Uploads an image"""
|
||||
if not file.content_type or not file.content_type.startswith("image"):
|
||||
raise HTTPException(status_code=415, detail="Not an image")
|
||||
|
||||
metadata = None
|
||||
workflow = None
|
||||
_metadata = None
|
||||
_workflow = None
|
||||
_graph = None
|
||||
|
||||
contents = await file.read()
|
||||
try:
|
||||
@@ -62,22 +65,28 @@ async def upload_image(
|
||||
|
||||
# TODO: retain non-invokeai metadata on upload?
|
||||
# attempt to parse metadata from image
|
||||
metadata_raw = pil_image.info.get("invokeai_metadata", None)
|
||||
if metadata_raw:
|
||||
try:
|
||||
metadata = MetadataFieldValidator.validate_json(metadata_raw)
|
||||
except ValidationError:
|
||||
ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image")
|
||||
pass
|
||||
metadata_raw = metadata if isinstance(metadata, str) else pil_image.info.get("invokeai_metadata", None)
|
||||
if isinstance(metadata_raw, str):
|
||||
_metadata = metadata_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image")
|
||||
pass
|
||||
|
||||
# attempt to parse workflow from image
|
||||
workflow_raw = pil_image.info.get("invokeai_workflow", None)
|
||||
if workflow_raw is not None:
|
||||
try:
|
||||
workflow = WorkflowWithoutIDValidator.validate_json(workflow_raw)
|
||||
except ValidationError:
|
||||
ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image")
|
||||
pass
|
||||
if isinstance(workflow_raw, str):
|
||||
_workflow = workflow_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.warn("Failed to parse workflow for uploaded image")
|
||||
pass
|
||||
|
||||
# attempt to extract graph from image
|
||||
graph_raw = pil_image.info.get("invokeai_graph", None)
|
||||
if isinstance(graph_raw, str):
|
||||
_graph = graph_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.warn("Failed to parse graph for uploaded image")
|
||||
pass
|
||||
|
||||
try:
|
||||
image_dto = ApiDependencies.invoker.services.images.create(
|
||||
@@ -86,8 +95,9 @@ async def upload_image(
|
||||
image_category=image_category,
|
||||
session_id=session_id,
|
||||
board_id=board_id,
|
||||
metadata=metadata,
|
||||
workflow=workflow,
|
||||
metadata=_metadata,
|
||||
workflow=_workflow,
|
||||
graph=_graph,
|
||||
is_intermediate=is_intermediate,
|
||||
)
|
||||
|
||||
@@ -185,14 +195,21 @@ async def get_image_metadata(
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
class WorkflowAndGraphResponse(BaseModel):
|
||||
workflow: Optional[str] = Field(description="The workflow used to generate the image, as stringified JSON")
|
||||
graph: Optional[str] = Field(description="The graph used to generate the image, as stringified JSON")
|
||||
|
||||
|
||||
@images_router.get(
|
||||
"/i/{image_name}/workflow", operation_id="get_image_workflow", response_model=Optional[WorkflowWithoutID]
|
||||
"/i/{image_name}/workflow", operation_id="get_image_workflow", response_model=WorkflowAndGraphResponse
|
||||
)
|
||||
async def get_image_workflow(
|
||||
image_name: str = Path(description="The name of image whose workflow to get"),
|
||||
) -> Optional[WorkflowWithoutID]:
|
||||
) -> WorkflowAndGraphResponse:
|
||||
try:
|
||||
return ApiDependencies.invoker.services.images.get_workflow(image_name)
|
||||
workflow = ApiDependencies.invoker.services.images.get_workflow(image_name)
|
||||
graph = ApiDependencies.invoker.services.images.get_graph(image_name)
|
||||
return WorkflowAndGraphResponse(workflow=workflow, graph=graph)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import pathlib
|
||||
import shutil
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
|
||||
from fastapi import Body, Path, Query, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
@@ -16,6 +16,7 @@ from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field
|
||||
from starlette.exceptions import HTTPException
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from invokeai.app.services.model_images.model_images_common import ModelImageFileNotFoundException
|
||||
from invokeai.app.services.model_install import ModelInstallJob
|
||||
from invokeai.app.services.model_records import (
|
||||
DuplicateModelException,
|
||||
@@ -52,6 +53,13 @@ class ModelsList(BaseModel):
|
||||
model_config = ConfigDict(use_enum_values=True)
|
||||
|
||||
|
||||
def add_cover_image_to_model_config(config: AnyModelConfig, dependencies: Type[ApiDependencies]) -> AnyModelConfig:
|
||||
"""Add a cover image URL to a model configuration."""
|
||||
cover_image = dependencies.invoker.services.model_images.get_url(config.key)
|
||||
config.cover_image = cover_image
|
||||
return config
|
||||
|
||||
|
||||
##############################################################################
|
||||
# These are example inputs and outputs that are used in places where Swagger
|
||||
# is unable to generate a correct example.
|
||||
@@ -118,8 +126,7 @@ async def list_model_records(
|
||||
record_store.search_by_attr(model_type=model_type, model_name=model_name, model_format=model_format)
|
||||
)
|
||||
for model in found_models:
|
||||
cover_image = ApiDependencies.invoker.services.model_images.get_url(model.key)
|
||||
model.cover_image = cover_image
|
||||
model = add_cover_image_to_model_config(model, ApiDependencies)
|
||||
return ModelsList(models=found_models)
|
||||
|
||||
|
||||
@@ -160,12 +167,9 @@ async def get_model_record(
|
||||
key: str = Path(description="Key of the model record to fetch."),
|
||||
) -> AnyModelConfig:
|
||||
"""Get a model record"""
|
||||
record_store = ApiDependencies.invoker.services.model_manager.store
|
||||
try:
|
||||
config: AnyModelConfig = record_store.get_model(key)
|
||||
cover_image = ApiDependencies.invoker.services.model_images.get_url(key)
|
||||
config.cover_image = cover_image
|
||||
return config
|
||||
config = ApiDependencies.invoker.services.model_manager.store.get_model(key)
|
||||
return add_cover_image_to_model_config(config, ApiDependencies)
|
||||
except UnknownModelException as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
@@ -294,14 +298,15 @@ async def update_model_record(
|
||||
installer = ApiDependencies.invoker.services.model_manager.install
|
||||
try:
|
||||
record_store.update_model(key, changes=changes)
|
||||
model_response: AnyModelConfig = installer.sync_model_path(key)
|
||||
config = installer.sync_model_path(key)
|
||||
config = add_cover_image_to_model_config(config, ApiDependencies)
|
||||
logger.info(f"Updated model: {key}")
|
||||
except UnknownModelException as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except ValueError as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
return model_response
|
||||
return config
|
||||
|
||||
|
||||
@model_manager_router.get(
|
||||
@@ -648,6 +653,14 @@ async def convert_model(
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
|
||||
# Update the model image if the model had one
|
||||
try:
|
||||
model_image = ApiDependencies.invoker.services.model_images.get(key)
|
||||
ApiDependencies.invoker.services.model_images.save(model_image, new_key)
|
||||
ApiDependencies.invoker.services.model_images.delete(key)
|
||||
except ModelImageFileNotFoundException:
|
||||
pass
|
||||
|
||||
# delete the original safetensors file
|
||||
installer.delete(key)
|
||||
|
||||
@@ -655,7 +668,8 @@ async def convert_model(
|
||||
shutil.rmtree(cache_path)
|
||||
|
||||
# return the config record for the new diffusers directory
|
||||
new_config: AnyModelConfig = store.get_model(new_key)
|
||||
new_config = store.get_model(new_key)
|
||||
new_config = add_cover_image_to_model_config(new_config, ApiDependencies)
|
||||
return new_config
|
||||
|
||||
|
||||
|
||||
@@ -164,6 +164,12 @@ def custom_openapi() -> dict[str, Any]:
|
||||
for schema_key, schema_json in additional_schemas[1]["$defs"].items():
|
||||
openapi_schema["components"]["schemas"][schema_key] = schema_json
|
||||
|
||||
openapi_schema["components"]["schemas"]["InvocationOutputMap"] = {
|
||||
"type": "object",
|
||||
"properties": {},
|
||||
"required": [],
|
||||
}
|
||||
|
||||
# Add a reference to the output type to additionalProperties of the invoker schema
|
||||
for invoker in all_invocations:
|
||||
invoker_name = invoker.__name__ # type: ignore [attr-defined] # this is a valid attribute
|
||||
@@ -172,6 +178,8 @@ def custom_openapi() -> dict[str, Any]:
|
||||
invoker_schema = openapi_schema["components"]["schemas"][f"{invoker_name}"]
|
||||
outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"}
|
||||
invoker_schema["output"] = outputs_ref
|
||||
openapi_schema["components"]["schemas"]["InvocationOutputMap"]["properties"][invoker.get_type()] = outputs_ref
|
||||
openapi_schema["components"]["schemas"]["InvocationOutputMap"]["required"].append(invoker.get_type())
|
||||
invoker_schema["class"] = "invocation"
|
||||
|
||||
# This code no longer seems to be necessary?
|
||||
|
||||
@@ -24,7 +24,6 @@ from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
OutputField,
|
||||
UIType,
|
||||
@@ -80,13 +79,13 @@ class ControlOutput(BaseInvocationOutput):
|
||||
control: ControlField = OutputField(description=FieldDescriptions.control)
|
||||
|
||||
|
||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.1")
|
||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.2")
|
||||
class ControlNetInvocation(BaseInvocation):
|
||||
"""Collects ControlNet info to pass to other nodes"""
|
||||
|
||||
image: ImageField = InputField(description="The control image")
|
||||
control_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.controlnet_model, input=Input.Direct, ui_type=UIType.ControlNetModel
|
||||
description=FieldDescriptions.controlnet_model, ui_type=UIType.ControlNetModel
|
||||
)
|
||||
control_weight: Union[float, List[float]] = InputField(
|
||||
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Literal, Optional
|
||||
|
||||
import cv2
|
||||
@@ -504,7 +503,7 @@ class ImageInverseLerpInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Blur NSFW Image",
|
||||
tags=["image", "nsfw"],
|
||||
category="image",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add blur to NSFW-flagged images"""
|
||||
@@ -516,23 +515,12 @@ class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
logger = context.logger
|
||||
logger.debug("Running NSFW checker")
|
||||
if SafetyChecker.has_nsfw_concept(image):
|
||||
logger.info("A potentially NSFW image has been detected. Image will be blurred.")
|
||||
blurry_image = image.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
caution = self._get_caution_img()
|
||||
blurry_image.paste(caution, (0, 0), caution)
|
||||
image = blurry_image
|
||||
image = SafetyChecker.blur_if_nsfw(image)
|
||||
|
||||
image_dto = context.images.save(image=image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
def _get_caution_img(self) -> Image.Image:
|
||||
import invokeai.app.assets.images as image_assets
|
||||
|
||||
caution = Image.open(Path(image_assets.__path__[0]) / "caution.png")
|
||||
return caution.resize((caution.width // 2, caution.height // 2))
|
||||
|
||||
|
||||
@invocation(
|
||||
"img_watermark",
|
||||
|
||||
@@ -5,7 +5,7 @@ from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, TensorField, UIType
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, TensorField, UIType
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
@@ -58,7 +58,7 @@ class IPAdapterOutput(BaseInvocationOutput):
|
||||
CLIP_VISION_MODEL_MAP = {"ViT-H": "ip_adapter_sd_image_encoder", "ViT-G": "ip_adapter_sdxl_image_encoder"}
|
||||
|
||||
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.4.0")
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.4.1")
|
||||
class IPAdapterInvocation(BaseInvocation):
|
||||
"""Collects IP-Adapter info to pass to other nodes."""
|
||||
|
||||
@@ -67,7 +67,6 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
ip_adapter_model: ModelIdentifierField = InputField(
|
||||
description="The IP-Adapter model.",
|
||||
title="IP-Adapter Model",
|
||||
input=Input.Direct,
|
||||
ui_order=-1,
|
||||
ui_type=UIType.IPAdapterModel,
|
||||
)
|
||||
|
||||
@@ -586,13 +586,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
unet: UNet2DConditionModel,
|
||||
scheduler: Scheduler,
|
||||
) -> StableDiffusionGeneratorPipeline:
|
||||
# TODO:
|
||||
# configure_model_padding(
|
||||
# unet,
|
||||
# self.seamless,
|
||||
# self.seamless_axes,
|
||||
# )
|
||||
|
||||
class FakeVae:
|
||||
class FakeVaeConfig:
|
||||
def __init__(self) -> None:
|
||||
|
||||
@@ -11,6 +11,7 @@ from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType,
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
@@ -93,19 +94,46 @@ class ModelLoaderOutput(UNetOutput, CLIPOutput, VAEOutput):
|
||||
pass
|
||||
|
||||
|
||||
@invocation_output("model_identifier_output")
|
||||
class ModelIdentifierOutput(BaseInvocationOutput):
|
||||
"""Model identifier output"""
|
||||
|
||||
model: ModelIdentifierField = OutputField(description="Model identifier", title="Model")
|
||||
|
||||
|
||||
@invocation(
|
||||
"model_identifier",
|
||||
title="Model identifier",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class ModelIdentifierInvocation(BaseInvocation):
|
||||
"""Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as
|
||||
input for any model, even if the model types don't match. If you connect this to a mismatched input, you'll get an
|
||||
error."""
|
||||
|
||||
model: ModelIdentifierField = InputField(description="The model to select", title="Model")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ModelIdentifierOutput:
|
||||
if not context.models.exists(self.model.key):
|
||||
raise Exception(f"Unknown model {self.model.key}")
|
||||
|
||||
return ModelIdentifierOutput(model=self.model)
|
||||
|
||||
|
||||
@invocation(
|
||||
"main_model_loader",
|
||||
title="Main Model",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.2",
|
||||
version="1.0.3",
|
||||
)
|
||||
class MainModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a main model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.main_model, input=Input.Direct, ui_type=UIType.MainModel
|
||||
)
|
||||
model: ModelIdentifierField = InputField(description=FieldDescriptions.main_model, ui_type=UIType.MainModel)
|
||||
# TODO: precision?
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ModelLoaderOutput:
|
||||
@@ -134,12 +162,12 @@ class LoRALoaderOutput(BaseInvocationOutput):
|
||||
clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP")
|
||||
|
||||
|
||||
@invocation("lora_loader", title="LoRA", tags=["model"], category="model", version="1.0.2")
|
||||
@invocation("lora_loader", title="LoRA", tags=["model"], category="model", version="1.0.3")
|
||||
class LoRALoaderInvocation(BaseInvocation):
|
||||
"""Apply selected lora to unet and text_encoder."""
|
||||
|
||||
lora: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA", ui_type=UIType.LoRAModel
|
||||
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
|
||||
)
|
||||
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
@@ -190,6 +218,75 @@ class LoRALoaderInvocation(BaseInvocation):
|
||||
return output
|
||||
|
||||
|
||||
@invocation_output("lora_selector_output")
|
||||
class LoRASelectorOutput(BaseInvocationOutput):
|
||||
"""Model loader output"""
|
||||
|
||||
lora: LoRAField = OutputField(description="LoRA model and weight", title="LoRA")
|
||||
|
||||
|
||||
@invocation("lora_selector", title="LoRA Selector", tags=["model"], category="model", version="1.0.1")
|
||||
class LoRASelectorInvocation(BaseInvocation):
|
||||
"""Selects a LoRA model and weight."""
|
||||
|
||||
lora: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
|
||||
)
|
||||
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LoRASelectorOutput:
|
||||
return LoRASelectorOutput(lora=LoRAField(lora=self.lora, weight=self.weight))
|
||||
|
||||
|
||||
@invocation("lora_collection_loader", title="LoRA Collection Loader", tags=["model"], category="model", version="1.0.0")
|
||||
class LoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of LoRAs to the provided UNet and CLIP models."""
|
||||
|
||||
loras: LoRAField | list[LoRAField] = InputField(
|
||||
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.unet,
|
||||
input=Input.Connection,
|
||||
title="UNet",
|
||||
)
|
||||
clip: Optional[CLIPField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
title="CLIP",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LoRALoaderOutput:
|
||||
output = LoRALoaderOutput()
|
||||
loras = self.loras if isinstance(self.loras, list) else [self.loras]
|
||||
added_loras: list[str] = []
|
||||
|
||||
for lora in loras:
|
||||
if lora.lora.key in added_loras:
|
||||
continue
|
||||
|
||||
if not context.models.exists(lora.lora.key):
|
||||
raise Exception(f"Unknown lora: {lora.lora.key}!")
|
||||
|
||||
assert lora.lora.base in (BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2)
|
||||
|
||||
added_loras.append(lora.lora.key)
|
||||
|
||||
if self.unet is not None:
|
||||
if output.unet is None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
output.unet.loras.append(lora)
|
||||
|
||||
if self.clip is not None:
|
||||
if output.clip is None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
output.clip.loras.append(lora)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
@invocation_output("sdxl_lora_loader_output")
|
||||
class SDXLLoRALoaderOutput(BaseInvocationOutput):
|
||||
"""SDXL LoRA Loader Output"""
|
||||
@@ -204,13 +301,13 @@ class SDXLLoRALoaderOutput(BaseInvocationOutput):
|
||||
title="SDXL LoRA",
|
||||
tags=["lora", "model"],
|
||||
category="model",
|
||||
version="1.0.2",
|
||||
version="1.0.3",
|
||||
)
|
||||
class SDXLLoRALoaderInvocation(BaseInvocation):
|
||||
"""Apply selected lora to unet and text_encoder."""
|
||||
|
||||
lora: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA", ui_type=UIType.LoRAModel
|
||||
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
|
||||
)
|
||||
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
@@ -279,12 +376,78 @@ class SDXLLoRALoaderInvocation(BaseInvocation):
|
||||
return output
|
||||
|
||||
|
||||
@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.2")
|
||||
@invocation(
|
||||
"sdxl_lora_collection_loader",
|
||||
title="SDXL LoRA Collection Loader",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
)
|
||||
class SDXLLoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of SDXL LoRAs to the provided UNet and CLIP models."""
|
||||
|
||||
loras: LoRAField | list[LoRAField] = InputField(
|
||||
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.unet,
|
||||
input=Input.Connection,
|
||||
title="UNet",
|
||||
)
|
||||
clip: Optional[CLIPField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
title="CLIP",
|
||||
)
|
||||
clip2: Optional[CLIPField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
title="CLIP 2",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> SDXLLoRALoaderOutput:
|
||||
output = SDXLLoRALoaderOutput()
|
||||
loras = self.loras if isinstance(self.loras, list) else [self.loras]
|
||||
added_loras: list[str] = []
|
||||
|
||||
for lora in loras:
|
||||
if lora.lora.key in added_loras:
|
||||
continue
|
||||
|
||||
if not context.models.exists(lora.lora.key):
|
||||
raise Exception(f"Unknown lora: {lora.lora.key}!")
|
||||
|
||||
assert lora.lora.base is BaseModelType.StableDiffusionXL
|
||||
|
||||
added_loras.append(lora.lora.key)
|
||||
|
||||
if self.unet is not None:
|
||||
if output.unet is None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
output.unet.loras.append(lora)
|
||||
|
||||
if self.clip is not None:
|
||||
if output.clip is None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
output.clip.loras.append(lora)
|
||||
|
||||
if self.clip2 is not None:
|
||||
if output.clip2 is None:
|
||||
output.clip2 = self.clip2.model_copy(deep=True)
|
||||
output.clip2.loras.append(lora)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.3")
|
||||
class VAELoaderInvocation(BaseInvocation):
|
||||
"""Loads a VAE model, outputting a VaeLoaderOutput"""
|
||||
|
||||
vae_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.vae_model, input=Input.Direct, title="VAE", ui_type=UIType.VAEModel
|
||||
description=FieldDescriptions.vae_model, title="VAE", ui_type=UIType.VAEModel
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> VAEOutput:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, UIType
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager import SubModelType
|
||||
|
||||
@@ -30,12 +30,12 @@ class SDXLRefinerModelLoaderOutput(BaseInvocationOutput):
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.2")
|
||||
@invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.3")
|
||||
class SDXLModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads an sdxl base model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.sdxl_main_model, input=Input.Direct, ui_type=UIType.SDXLMainModel
|
||||
description=FieldDescriptions.sdxl_main_model, ui_type=UIType.SDXLMainModel
|
||||
)
|
||||
# TODO: precision?
|
||||
|
||||
@@ -67,13 +67,13 @@ class SDXLModelLoaderInvocation(BaseInvocation):
|
||||
title="SDXL Refiner Model",
|
||||
tags=["model", "sdxl", "refiner"],
|
||||
category="model",
|
||||
version="1.0.2",
|
||||
version="1.0.3",
|
||||
)
|
||||
class SDXLRefinerModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads an sdxl refiner model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.sdxl_refiner_model, input=Input.Direct, ui_type=UIType.SDXLRefinerModel
|
||||
description=FieldDescriptions.sdxl_refiner_model, ui_type=UIType.SDXLRefinerModel
|
||||
)
|
||||
# TODO: precision?
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
@@ -45,7 +45,7 @@ class T2IAdapterOutput(BaseInvocationOutput):
|
||||
|
||||
|
||||
@invocation(
|
||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.2"
|
||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.3"
|
||||
)
|
||||
class T2IAdapterInvocation(BaseInvocation):
|
||||
"""Collects T2I-Adapter info to pass to other nodes."""
|
||||
@@ -55,7 +55,6 @@ class T2IAdapterInvocation(BaseInvocation):
|
||||
t2i_adapter_model: ModelIdentifierField = InputField(
|
||||
description="The T2I-Adapter model.",
|
||||
title="T2I-Adapter Model",
|
||||
input=Input.Direct,
|
||||
ui_order=-1,
|
||||
ui_type=UIType.T2IAdapterModel,
|
||||
)
|
||||
|
||||
@@ -122,6 +122,8 @@ class EventServiceBase:
|
||||
source_node_id: str,
|
||||
error_type: str,
|
||||
error: str,
|
||||
user_id: str | None,
|
||||
project_id: str | None,
|
||||
) -> None:
|
||||
"""Emitted when an invocation has completed"""
|
||||
self.__emit_queue_event(
|
||||
@@ -135,6 +137,8 @@ class EventServiceBase:
|
||||
"source_node_id": source_node_id,
|
||||
"error_type": error_type,
|
||||
"error": error,
|
||||
"user_id": user_id,
|
||||
"project_id": project_id,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -4,9 +4,6 @@ from typing import Optional
|
||||
|
||||
from PIL.Image import Image as PILImageType
|
||||
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||
|
||||
|
||||
class ImageFileStorageBase(ABC):
|
||||
"""Low-level service responsible for storing and retrieving image files."""
|
||||
@@ -33,8 +30,9 @@ class ImageFileStorageBase(ABC):
|
||||
self,
|
||||
image: PILImageType,
|
||||
image_name: str,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
workflow: Optional[WorkflowWithoutID] = None,
|
||||
metadata: Optional[str] = None,
|
||||
workflow: Optional[str] = None,
|
||||
graph: Optional[str] = None,
|
||||
thumbnail_size: int = 256,
|
||||
) -> None:
|
||||
"""Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image name, thumbnail name, and created timestamp."""
|
||||
@@ -46,6 +44,11 @@ class ImageFileStorageBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||
def get_workflow(self, image_name: str) -> Optional[str]:
|
||||
"""Gets the workflow of an image."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_graph(self, image_name: str) -> Optional[str]:
|
||||
"""Gets the graph of an image."""
|
||||
pass
|
||||
|
||||
@@ -7,9 +7,7 @@ from PIL import Image, PngImagePlugin
|
||||
from PIL.Image import Image as PILImageType
|
||||
from send2trash import send2trash
|
||||
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||
from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail
|
||||
|
||||
from .image_files_base import ImageFileStorageBase
|
||||
@@ -56,8 +54,9 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
self,
|
||||
image: PILImageType,
|
||||
image_name: str,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
workflow: Optional[WorkflowWithoutID] = None,
|
||||
metadata: Optional[str] = None,
|
||||
workflow: Optional[str] = None,
|
||||
graph: Optional[str] = None,
|
||||
thumbnail_size: int = 256,
|
||||
) -> None:
|
||||
try:
|
||||
@@ -68,13 +67,14 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
info_dict = {}
|
||||
|
||||
if metadata is not None:
|
||||
metadata_json = metadata.model_dump_json()
|
||||
info_dict["invokeai_metadata"] = metadata_json
|
||||
pnginfo.add_text("invokeai_metadata", metadata_json)
|
||||
info_dict["invokeai_metadata"] = metadata
|
||||
pnginfo.add_text("invokeai_metadata", metadata)
|
||||
if workflow is not None:
|
||||
workflow_json = workflow.model_dump_json()
|
||||
info_dict["invokeai_workflow"] = workflow_json
|
||||
pnginfo.add_text("invokeai_workflow", workflow_json)
|
||||
info_dict["invokeai_workflow"] = workflow
|
||||
pnginfo.add_text("invokeai_workflow", workflow)
|
||||
if graph is not None:
|
||||
info_dict["invokeai_graph"] = graph
|
||||
pnginfo.add_text("invokeai_graph", graph)
|
||||
|
||||
# When saving the image, the image object's info field is not populated. We need to set it
|
||||
image.info = info_dict
|
||||
@@ -129,11 +129,18 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
path = path if isinstance(path, Path) else Path(path)
|
||||
return path.exists()
|
||||
|
||||
def get_workflow(self, image_name: str) -> WorkflowWithoutID | None:
|
||||
def get_workflow(self, image_name: str) -> str | None:
|
||||
image = self.get(image_name)
|
||||
workflow = image.info.get("invokeai_workflow", None)
|
||||
if workflow is not None:
|
||||
return WorkflowWithoutID.model_validate_json(workflow)
|
||||
if isinstance(workflow, str):
|
||||
return workflow
|
||||
return None
|
||||
|
||||
def get_graph(self, image_name: str) -> str | None:
|
||||
image = self.get(image_name)
|
||||
graph = image.info.get("invokeai_graph", None)
|
||||
if isinstance(graph, str):
|
||||
return graph
|
||||
return None
|
||||
|
||||
def __validate_storage_folders(self) -> None:
|
||||
|
||||
@@ -80,7 +80,7 @@ class ImageRecordStorageBase(ABC):
|
||||
starred: Optional[bool] = False,
|
||||
session_id: Optional[str] = None,
|
||||
node_id: Optional[str] = None,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
metadata: Optional[str] = None,
|
||||
) -> datetime:
|
||||
"""Saves an image record."""
|
||||
pass
|
||||
|
||||
@@ -328,10 +328,9 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
starred: Optional[bool] = False,
|
||||
session_id: Optional[str] = None,
|
||||
node_id: Optional[str] = None,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
metadata: Optional[str] = None,
|
||||
) -> datetime:
|
||||
try:
|
||||
metadata_json = metadata.model_dump_json() if metadata is not None else None
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
@@ -358,7 +357,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
height,
|
||||
node_id,
|
||||
session_id,
|
||||
metadata_json,
|
||||
metadata,
|
||||
is_intermediate,
|
||||
starred,
|
||||
has_workflow,
|
||||
|
||||
@@ -12,7 +12,6 @@ from invokeai.app.services.image_records.image_records_common import (
|
||||
)
|
||||
from invokeai.app.services.images.images_common import ImageDTO
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||
|
||||
|
||||
class ImageServiceABC(ABC):
|
||||
@@ -51,8 +50,9 @@ class ImageServiceABC(ABC):
|
||||
session_id: Optional[str] = None,
|
||||
board_id: Optional[str] = None,
|
||||
is_intermediate: Optional[bool] = False,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
workflow: Optional[WorkflowWithoutID] = None,
|
||||
metadata: Optional[str] = None,
|
||||
workflow: Optional[str] = None,
|
||||
graph: Optional[str] = None,
|
||||
) -> ImageDTO:
|
||||
"""Creates an image, storing the file and its metadata."""
|
||||
pass
|
||||
@@ -87,7 +87,12 @@ class ImageServiceABC(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||
def get_workflow(self, image_name: str) -> Optional[str]:
|
||||
"""Gets an image's workflow."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_graph(self, image_name: str) -> Optional[str]:
|
||||
"""Gets an image's workflow."""
|
||||
pass
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ from PIL.Image import Image as PILImageType
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||
|
||||
from ..image_files.image_files_common import (
|
||||
ImageFileDeleteException,
|
||||
@@ -42,8 +41,9 @@ class ImageService(ImageServiceABC):
|
||||
session_id: Optional[str] = None,
|
||||
board_id: Optional[str] = None,
|
||||
is_intermediate: Optional[bool] = False,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
workflow: Optional[WorkflowWithoutID] = None,
|
||||
metadata: Optional[str] = None,
|
||||
workflow: Optional[str] = None,
|
||||
graph: Optional[str] = None,
|
||||
) -> ImageDTO:
|
||||
if image_origin not in ResourceOrigin:
|
||||
raise InvalidOriginException
|
||||
@@ -64,7 +64,7 @@ class ImageService(ImageServiceABC):
|
||||
image_category=image_category,
|
||||
width=width,
|
||||
height=height,
|
||||
has_workflow=workflow is not None,
|
||||
has_workflow=workflow is not None or graph is not None,
|
||||
# Meta fields
|
||||
is_intermediate=is_intermediate,
|
||||
# Nullable fields
|
||||
@@ -75,7 +75,7 @@ class ImageService(ImageServiceABC):
|
||||
if board_id is not None:
|
||||
self.__invoker.services.board_image_records.add_image_to_board(board_id=board_id, image_name=image_name)
|
||||
self.__invoker.services.image_files.save(
|
||||
image_name=image_name, image=image, metadata=metadata, workflow=workflow
|
||||
image_name=image_name, image=image, metadata=metadata, workflow=workflow, graph=graph
|
||||
)
|
||||
image_dto = self.get_dto(image_name)
|
||||
|
||||
@@ -157,7 +157,7 @@ class ImageService(ImageServiceABC):
|
||||
self.__invoker.services.logger.error("Problem getting image metadata")
|
||||
raise e
|
||||
|
||||
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||
def get_workflow(self, image_name: str) -> Optional[str]:
|
||||
try:
|
||||
return self.__invoker.services.image_files.get_workflow(image_name)
|
||||
except ImageFileNotFoundException:
|
||||
@@ -167,6 +167,16 @@ class ImageService(ImageServiceABC):
|
||||
self.__invoker.services.logger.error("Problem getting image workflow")
|
||||
raise
|
||||
|
||||
def get_graph(self, image_name: str) -> Optional[str]:
|
||||
try:
|
||||
return self.__invoker.services.image_files.get_graph(image_name)
|
||||
except ImageFileNotFoundException:
|
||||
self.__invoker.services.logger.error("Image file not found")
|
||||
raise
|
||||
except Exception:
|
||||
self.__invoker.services.logger.error("Problem getting image graph")
|
||||
raise
|
||||
|
||||
def get_path(self, image_name: str, thumbnail: bool = False) -> str:
|
||||
try:
|
||||
return str(self.__invoker.services.image_files.get_path(image_name, thumbnail))
|
||||
|
||||
@@ -237,6 +237,8 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
source_node_id=source_invocation_id,
|
||||
error_type=e.__class__.__name__,
|
||||
error=error,
|
||||
user_id=None,
|
||||
project_id=None,
|
||||
)
|
||||
pass
|
||||
|
||||
|
||||
@@ -180,9 +180,9 @@ class ImagesInterface(InvocationContextInterface):
|
||||
# If `metadata` is provided directly, use that. Else, use the metadata provided by `WithMetadata`, falling back to None.
|
||||
metadata_ = None
|
||||
if metadata:
|
||||
metadata_ = metadata
|
||||
elif isinstance(self._data.invocation, WithMetadata):
|
||||
metadata_ = self._data.invocation.metadata
|
||||
metadata_ = metadata.model_dump_json()
|
||||
elif isinstance(self._data.invocation, WithMetadata) and self._data.invocation.metadata:
|
||||
metadata_ = self._data.invocation.metadata.model_dump_json()
|
||||
|
||||
# If `board_id` is provided directly, use that. Else, use the board provided by `WithBoard`, falling back to None.
|
||||
board_id_ = None
|
||||
@@ -191,6 +191,14 @@ class ImagesInterface(InvocationContextInterface):
|
||||
elif isinstance(self._data.invocation, WithBoard) and self._data.invocation.board:
|
||||
board_id_ = self._data.invocation.board.board_id
|
||||
|
||||
workflow_ = None
|
||||
if self._data.queue_item.workflow:
|
||||
workflow_ = self._data.queue_item.workflow.model_dump_json()
|
||||
|
||||
graph_ = None
|
||||
if self._data.queue_item.session.graph:
|
||||
graph_ = self._data.queue_item.session.graph.model_dump_json()
|
||||
|
||||
return self._services.images.create(
|
||||
image=image,
|
||||
is_intermediate=self._data.invocation.is_intermediate,
|
||||
@@ -198,7 +206,8 @@ class ImagesInterface(InvocationContextInterface):
|
||||
board_id=board_id_,
|
||||
metadata=metadata_,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
workflow=self._data.queue_item.workflow,
|
||||
workflow=workflow_,
|
||||
graph=graph_,
|
||||
session_id=self._data.queue_item.session_id,
|
||||
node_id=self._data.invocation.id,
|
||||
)
|
||||
|
||||
@@ -4,5 +4,4 @@ Initialization file for invokeai.backend.image_util methods.
|
||||
|
||||
from .infill_methods.patchmatch import PatchMatch # noqa: F401
|
||||
from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata # noqa: F401
|
||||
from .seamless import configure_model_padding # noqa: F401
|
||||
from .util import InitImageResizer, make_grid # noqa: F401
|
||||
|
||||
@@ -8,7 +8,7 @@ from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from PIL import Image
|
||||
from PIL import Image, ImageFilter
|
||||
from transformers import AutoFeatureExtractor
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
@@ -16,6 +16,7 @@ from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.silence_warnings import SilenceWarnings
|
||||
|
||||
repo_id = "CompVis/stable-diffusion-safety-checker"
|
||||
CHECKER_PATH = "core/convert/stable-diffusion-safety-checker"
|
||||
|
||||
|
||||
@@ -24,30 +25,30 @@ class SafetyChecker:
|
||||
Wrapper around SafetyChecker model.
|
||||
"""
|
||||
|
||||
safety_checker = None
|
||||
feature_extractor = None
|
||||
tried_load: bool = False
|
||||
safety_checker = None
|
||||
|
||||
@classmethod
|
||||
def _load_safety_checker(cls):
|
||||
if cls.tried_load:
|
||||
if cls.safety_checker is not None and cls.feature_extractor is not None:
|
||||
return
|
||||
|
||||
try:
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(get_config().models_path / CHECKER_PATH)
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(get_config().models_path / CHECKER_PATH)
|
||||
model_path = get_config().models_path / CHECKER_PATH
|
||||
if model_path.exists():
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(model_path)
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(model_path)
|
||||
else:
|
||||
model_path.mkdir(parents=True, exist_ok=True)
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
|
||||
cls.feature_extractor.save_pretrained(model_path, safe_serialization=True)
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(repo_id)
|
||||
cls.safety_checker.save_pretrained(model_path, safe_serialization=True)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load NSFW checker: {str(e)}")
|
||||
cls.tried_load = True
|
||||
|
||||
@classmethod
|
||||
def safety_checker_available(cls) -> bool:
|
||||
return Path(get_config().models_path, CHECKER_PATH).exists()
|
||||
|
||||
@classmethod
|
||||
def has_nsfw_concept(cls, image: Image.Image) -> bool:
|
||||
if not cls.safety_checker_available() and cls.tried_load:
|
||||
return False
|
||||
cls._load_safety_checker()
|
||||
if cls.safety_checker is None or cls.feature_extractor is None:
|
||||
return False
|
||||
@@ -60,3 +61,24 @@ class SafetyChecker:
|
||||
with SilenceWarnings():
|
||||
checked_image, has_nsfw_concept = cls.safety_checker(images=x_image, clip_input=features.pixel_values)
|
||||
return has_nsfw_concept[0]
|
||||
|
||||
@classmethod
|
||||
def blur_if_nsfw(cls, image: Image.Image) -> Image.Image:
|
||||
if cls.has_nsfw_concept(image):
|
||||
logger.warning("A potentially NSFW image has been detected. Image will be blurred.")
|
||||
blurry_image = image.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
caution = cls._get_caution_img()
|
||||
# Center the caution image on the blurred image
|
||||
x = (blurry_image.width - caution.width) // 2
|
||||
y = (blurry_image.height - caution.height) // 2
|
||||
blurry_image.paste(caution, (x, y), caution)
|
||||
image = blurry_image
|
||||
|
||||
return image
|
||||
|
||||
@classmethod
|
||||
def _get_caution_img(cls) -> Image.Image:
|
||||
import invokeai.app.assets.images as image_assets
|
||||
|
||||
caution = Image.open(Path(image_assets.__path__[0]) / "caution.png")
|
||||
return caution.resize((caution.width // 2, caution.height // 2))
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
"""
|
||||
Patch for Conv2d._conv_forward that supports asymmetric padding
|
||||
"""
|
||||
working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"])
|
||||
working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"])
|
||||
return nn.functional.conv2d(
|
||||
working,
|
||||
weight,
|
||||
bias,
|
||||
self.stride,
|
||||
nn.modules.utils._pair(0),
|
||||
self.dilation,
|
||||
self.groups,
|
||||
)
|
||||
|
||||
|
||||
def configure_model_padding(model, seamless, seamless_axes):
|
||||
"""
|
||||
Modifies the 2D convolution layers to use a circular padding mode based on
|
||||
the `seamless` and `seamless_axes` options.
|
||||
"""
|
||||
# TODO: get an explicit interface for this in diffusers: https://github.com/huggingface/diffusers/issues/556
|
||||
for m in model.modules():
|
||||
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
if seamless:
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["x"] = (
|
||||
m._reversed_padding_repeated_twice[0],
|
||||
m._reversed_padding_repeated_twice[1],
|
||||
0,
|
||||
0,
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["y"] = (
|
||||
0,
|
||||
0,
|
||||
m._reversed_padding_repeated_twice[2],
|
||||
m._reversed_padding_repeated_twice[3],
|
||||
)
|
||||
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
|
||||
else:
|
||||
m._conv_forward = nn.Conv2d._conv_forward.__get__(m, nn.Conv2d)
|
||||
if hasattr(m, "asymmetric_padding_mode"):
|
||||
del m.asymmetric_padding_mode
|
||||
if hasattr(m, "asymmetric_padding"):
|
||||
del m.asymmetric_padding
|
||||
@@ -1,89 +1,51 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import contextmanager
|
||||
from typing import Callable, List, Union
|
||||
from typing import Callable, List, Optional, Tuple, Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
|
||||
from diffusers.models.lora import LoRACompatibleConv
|
||||
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
|
||||
|
||||
def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
"""
|
||||
Patch for Conv2d._conv_forward that supports asymmetric padding
|
||||
"""
|
||||
working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"])
|
||||
working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"])
|
||||
return nn.functional.conv2d(
|
||||
working,
|
||||
weight,
|
||||
bias,
|
||||
self.stride,
|
||||
nn.modules.utils._pair(0),
|
||||
self.dilation,
|
||||
self.groups,
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL, AutoencoderTiny], seamless_axes: List[str]):
|
||||
if not seamless_axes:
|
||||
yield
|
||||
return
|
||||
|
||||
# Callable: (input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor
|
||||
to_restore: list[tuple[nn.Conv2d | nn.ConvTranspose2d, Callable]] = []
|
||||
# override conv_forward
|
||||
# https://github.com/huggingface/diffusers/issues/556#issuecomment-1993287019
|
||||
def _conv_forward_asymmetric(self, input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None):
|
||||
self.paddingX = (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0)
|
||||
self.paddingY = (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3])
|
||||
working = torch.nn.functional.pad(input, self.paddingX, mode=x_mode)
|
||||
working = torch.nn.functional.pad(working, self.paddingY, mode=y_mode)
|
||||
return torch.nn.functional.conv2d(
|
||||
working, weight, bias, self.stride, torch.nn.modules.utils._pair(0), self.dilation, self.groups
|
||||
)
|
||||
|
||||
original_layers: List[Tuple[nn.Conv2d, Callable]] = []
|
||||
|
||||
try:
|
||||
# Hard coded to skip down block layers, allowing for seamless tiling at the expense of prompt adherence
|
||||
skipped_layers = 1
|
||||
for m_name, m in model.named_modules():
|
||||
if not isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
continue
|
||||
x_mode = "circular" if "x" in seamless_axes else "constant"
|
||||
y_mode = "circular" if "y" in seamless_axes else "constant"
|
||||
|
||||
if isinstance(model, UNet2DConditionModel) and m_name.startswith("down_blocks.") and ".resnets." in m_name:
|
||||
# down_blocks.1.resnets.1.conv1
|
||||
_, block_num, _, resnet_num, submodule_name = m_name.split(".")
|
||||
block_num = int(block_num)
|
||||
resnet_num = int(resnet_num)
|
||||
conv_layers: List[torch.nn.Conv2d] = []
|
||||
|
||||
if block_num >= len(model.down_blocks) - skipped_layers:
|
||||
continue
|
||||
for module in model.modules():
|
||||
if isinstance(module, torch.nn.Conv2d):
|
||||
conv_layers.append(module)
|
||||
|
||||
# Skip the second resnet (could be configurable)
|
||||
if resnet_num > 0:
|
||||
continue
|
||||
|
||||
# Skip Conv2d layers (could be configurable)
|
||||
if submodule_name == "conv2":
|
||||
continue
|
||||
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["x"] = (
|
||||
m._reversed_padding_repeated_twice[0],
|
||||
m._reversed_padding_repeated_twice[1],
|
||||
0,
|
||||
0,
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["y"] = (
|
||||
0,
|
||||
0,
|
||||
m._reversed_padding_repeated_twice[2],
|
||||
m._reversed_padding_repeated_twice[3],
|
||||
)
|
||||
|
||||
to_restore.append((m, m._conv_forward))
|
||||
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
|
||||
for layer in conv_layers:
|
||||
if isinstance(layer, LoRACompatibleConv) and layer.lora_layer is None:
|
||||
layer.lora_layer = lambda *x: 0
|
||||
original_layers.append((layer, layer._conv_forward))
|
||||
layer._conv_forward = _conv_forward_asymmetric.__get__(layer, torch.nn.Conv2d)
|
||||
|
||||
yield
|
||||
|
||||
finally:
|
||||
for module, orig_conv_forward in to_restore:
|
||||
module._conv_forward = orig_conv_forward
|
||||
if hasattr(module, "asymmetric_padding_mode"):
|
||||
del module.asymmetric_padding_mode
|
||||
if hasattr(module, "asymmetric_padding"):
|
||||
del module.asymmetric_padding
|
||||
for layer, orig_conv_forward in original_layers:
|
||||
layer._conv_forward = orig_conv_forward
|
||||
|
||||
@@ -10,6 +10,8 @@ module.exports = {
|
||||
'path/no-relative-imports': ['error', { maxDepth: 0 }],
|
||||
// https://github.com/edvardchen/eslint-plugin-i18next/blob/HEAD/docs/rules/no-literal-string.md
|
||||
'i18next/no-literal-string': 'error',
|
||||
// https://eslint.org/docs/latest/rules/no-console
|
||||
'no-console': 'error',
|
||||
},
|
||||
overrides: [
|
||||
/**
|
||||
|
||||
3
invokeai/frontend/web/.gitignore
vendored
3
invokeai/frontend/web/.gitignore
vendored
@@ -43,4 +43,5 @@ stats.html
|
||||
yalc.lock
|
||||
|
||||
# vitest
|
||||
tsconfig.vitest-temp.json
|
||||
tsconfig.vitest-temp.json
|
||||
coverage/
|
||||
@@ -35,6 +35,7 @@
|
||||
"storybook": "storybook dev -p 6006",
|
||||
"build-storybook": "storybook build",
|
||||
"test": "vitest",
|
||||
"test:ui": "vitest --coverage --ui",
|
||||
"test:no-watch": "vitest --no-watch"
|
||||
},
|
||||
"madge": {
|
||||
@@ -132,6 +133,8 @@
|
||||
"@types/react-dom": "^18.3.0",
|
||||
"@types/uuid": "^9.0.8",
|
||||
"@vitejs/plugin-react-swc": "^3.6.0",
|
||||
"@vitest/coverage-v8": "^1.5.0",
|
||||
"@vitest/ui": "^1.5.0",
|
||||
"concurrently": "^8.2.2",
|
||||
"dpdm": "^3.14.0",
|
||||
"eslint": "^8.57.0",
|
||||
|
||||
148
invokeai/frontend/web/pnpm-lock.yaml
generated
148
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -229,6 +229,12 @@ devDependencies:
|
||||
'@vitejs/plugin-react-swc':
|
||||
specifier: ^3.6.0
|
||||
version: 3.6.0(vite@5.2.11)
|
||||
'@vitest/coverage-v8':
|
||||
specifier: ^1.5.0
|
||||
version: 1.6.0(vitest@1.6.0)
|
||||
'@vitest/ui':
|
||||
specifier: ^1.5.0
|
||||
version: 1.6.0(vitest@1.6.0)
|
||||
concurrently:
|
||||
specifier: ^8.2.2
|
||||
version: 8.2.2
|
||||
@@ -288,7 +294,7 @@ devDependencies:
|
||||
version: 4.3.2(typescript@5.4.5)(vite@5.2.11)
|
||||
vitest:
|
||||
specifier: ^1.6.0
|
||||
version: 1.6.0(@types/node@20.12.10)
|
||||
version: 1.6.0(@types/node@20.12.10)(@vitest/ui@1.6.0)
|
||||
|
||||
packages:
|
||||
|
||||
@@ -1679,6 +1685,10 @@ packages:
|
||||
resolution: {integrity: sha512-4iri8i1AqYHJE2DstZYkyEprg6Pq6sKx3xn5FpySk9sNhH7qN2LLlHJCfDTZRILNwQNPD7mATWM0TBui7uC1pA==}
|
||||
dev: true
|
||||
|
||||
/@bcoe/v8-coverage@0.2.3:
|
||||
resolution: {integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==}
|
||||
dev: true
|
||||
|
||||
/@chakra-ui/accordion@2.3.1(@chakra-ui/system@2.6.2)(framer-motion@10.18.0)(react@18.3.1):
|
||||
resolution: {integrity: sha512-FSXRm8iClFyU+gVaXisOSEw0/4Q+qZbFRiuhIAkVU6Boj0FxAMrlo9a8AV5TuF77rgaHytCdHk0Ng+cyUijrag==}
|
||||
peerDependencies:
|
||||
@@ -3635,6 +3645,11 @@ packages:
|
||||
wrap-ansi-cjs: /wrap-ansi@7.0.0
|
||||
dev: true
|
||||
|
||||
/@istanbuljs/schema@0.1.3:
|
||||
resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==}
|
||||
engines: {node: '>=8'}
|
||||
dev: true
|
||||
|
||||
/@jest/schemas@29.6.3:
|
||||
resolution: {integrity: sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==}
|
||||
engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0}
|
||||
@@ -3822,6 +3837,10 @@ packages:
|
||||
dev: true
|
||||
optional: true
|
||||
|
||||
/@polka/url@1.0.0-next.25:
|
||||
resolution: {integrity: sha512-j7P6Rgr3mmtdkeDGTe0E/aYyWEWVtc5yFXtHCRHs28/jptDEWfaVOc5T7cblqy1XKPPfCxJc/8DwQ5YgLOZOVQ==}
|
||||
dev: true
|
||||
|
||||
/@popperjs/core@2.11.8:
|
||||
resolution: {integrity: sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==}
|
||||
dev: false
|
||||
@@ -5146,7 +5165,7 @@ packages:
|
||||
dom-accessibility-api: 0.6.3
|
||||
lodash: 4.17.21
|
||||
redent: 3.0.0
|
||||
vitest: 1.6.0(@types/node@20.12.10)
|
||||
vitest: 1.6.0(@types/node@20.12.10)(@vitest/ui@1.6.0)
|
||||
dev: true
|
||||
|
||||
/@testing-library/user-event@14.5.2(@testing-library/dom@9.3.4):
|
||||
@@ -5825,6 +5844,29 @@ packages:
|
||||
- '@swc/helpers'
|
||||
dev: true
|
||||
|
||||
/@vitest/coverage-v8@1.6.0(vitest@1.6.0):
|
||||
resolution: {integrity: sha512-KvapcbMY/8GYIG0rlwwOKCVNRc0OL20rrhFkg/CHNzncV03TE2XWvO5w9uZYoxNiMEBacAJt3unSOiZ7svePew==}
|
||||
peerDependencies:
|
||||
vitest: 1.6.0
|
||||
dependencies:
|
||||
'@ampproject/remapping': 2.3.0
|
||||
'@bcoe/v8-coverage': 0.2.3
|
||||
debug: 4.3.4
|
||||
istanbul-lib-coverage: 3.2.2
|
||||
istanbul-lib-report: 3.0.1
|
||||
istanbul-lib-source-maps: 5.0.4
|
||||
istanbul-reports: 3.1.7
|
||||
magic-string: 0.30.10
|
||||
magicast: 0.3.4
|
||||
picocolors: 1.0.0
|
||||
std-env: 3.7.0
|
||||
strip-literal: 2.1.0
|
||||
test-exclude: 6.0.0
|
||||
vitest: 1.6.0(@types/node@20.12.10)(@vitest/ui@1.6.0)
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
dev: true
|
||||
|
||||
/@vitest/expect@1.3.1:
|
||||
resolution: {integrity: sha512-xofQFwIzfdmLLlHa6ag0dPV8YsnKOCP1KdAeVVh34vSjN2dcUiXYCD9htu/9eM7t8Xln4v03U9HLxLpPlsXdZw==}
|
||||
dependencies:
|
||||
@@ -5869,6 +5911,21 @@ packages:
|
||||
tinyspy: 2.2.1
|
||||
dev: true
|
||||
|
||||
/@vitest/ui@1.6.0(vitest@1.6.0):
|
||||
resolution: {integrity: sha512-k3Lyo+ONLOgylctiGovRKy7V4+dIN2yxstX3eY5cWFXH6WP+ooVX79YSyi0GagdTQzLmT43BF27T0s6dOIPBXA==}
|
||||
peerDependencies:
|
||||
vitest: 1.6.0
|
||||
dependencies:
|
||||
'@vitest/utils': 1.6.0
|
||||
fast-glob: 3.3.2
|
||||
fflate: 0.8.2
|
||||
flatted: 3.3.1
|
||||
pathe: 1.1.2
|
||||
picocolors: 1.0.0
|
||||
sirv: 2.0.4
|
||||
vitest: 1.6.0(@types/node@20.12.10)(@vitest/ui@1.6.0)
|
||||
dev: true
|
||||
|
||||
/@vitest/utils@1.3.1:
|
||||
resolution: {integrity: sha512-d3Waie/299qqRyHTm2DjADeTaNdNSVsnwHPWrs20JMpjh6eiVq7ggggweO8rc4arhf6rRkWuHKwvxGvejUXZZQ==}
|
||||
dependencies:
|
||||
@@ -8521,6 +8578,10 @@ packages:
|
||||
resolution: {integrity: sha512-3yurQZ2hD9VISAhJJP9bpYFNQrHHBXE2JxxjY5aLEcDi46RmAzJE2OC9FAde0yis5ElW0jTTzs0zfg/Cca4XqQ==}
|
||||
dev: true
|
||||
|
||||
/fflate@0.8.2:
|
||||
resolution: {integrity: sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==}
|
||||
dev: true
|
||||
|
||||
/file-entry-cache@6.0.1:
|
||||
resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==}
|
||||
engines: {node: ^10.12.0 || >=12.0.0}
|
||||
@@ -9084,6 +9145,10 @@ packages:
|
||||
resolution: {integrity: sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==}
|
||||
dev: true
|
||||
|
||||
/html-escaper@2.0.2:
|
||||
resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==}
|
||||
dev: true
|
||||
|
||||
/html-parse-stringify@3.0.1:
|
||||
resolution: {integrity: sha512-KknJ50kTInJ7qIScF3jeaFRpMpE8/lfiTdzf/twXyPBLAGrLRTmkz3AdTnKeh40X8k9L2fdYwEp/42WGXIRGcg==}
|
||||
dependencies:
|
||||
@@ -9513,6 +9578,39 @@ packages:
|
||||
engines: {node: '>=0.10.0'}
|
||||
dev: true
|
||||
|
||||
/istanbul-lib-coverage@3.2.2:
|
||||
resolution: {integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==}
|
||||
engines: {node: '>=8'}
|
||||
dev: true
|
||||
|
||||
/istanbul-lib-report@3.0.1:
|
||||
resolution: {integrity: sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==}
|
||||
engines: {node: '>=10'}
|
||||
dependencies:
|
||||
istanbul-lib-coverage: 3.2.2
|
||||
make-dir: 4.0.0
|
||||
supports-color: 7.2.0
|
||||
dev: true
|
||||
|
||||
/istanbul-lib-source-maps@5.0.4:
|
||||
resolution: {integrity: sha512-wHOoEsNJTVltaJp8eVkm8w+GVkVNHT2YDYo53YdzQEL2gWm1hBX5cGFR9hQJtuGLebidVX7et3+dmDZrmclduw==}
|
||||
engines: {node: '>=10'}
|
||||
dependencies:
|
||||
'@jridgewell/trace-mapping': 0.3.25
|
||||
debug: 4.3.4
|
||||
istanbul-lib-coverage: 3.2.2
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
dev: true
|
||||
|
||||
/istanbul-reports@3.1.7:
|
||||
resolution: {integrity: sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==}
|
||||
engines: {node: '>=8'}
|
||||
dependencies:
|
||||
html-escaper: 2.0.2
|
||||
istanbul-lib-report: 3.0.1
|
||||
dev: true
|
||||
|
||||
/iterable-lookahead@1.0.0:
|
||||
resolution: {integrity: sha512-hJnEP2Xk4+44DDwJqUQGdXal5VbyeWLaPyDl2AQc242Zr7iqz4DgpQOrEzglWVMGHMDCkguLHEKxd1+rOsmgSQ==}
|
||||
engines: {node: '>=4'}
|
||||
@@ -9912,6 +10010,14 @@ packages:
|
||||
'@jridgewell/sourcemap-codec': 1.4.15
|
||||
dev: true
|
||||
|
||||
/magicast@0.3.4:
|
||||
resolution: {integrity: sha512-TyDF/Pn36bBji9rWKHlZe+PZb6Mx5V8IHCSxk7X4aljM4e/vyDvZZYwHewdVaqiA0nb3ghfHU/6AUpDxWoER2Q==}
|
||||
dependencies:
|
||||
'@babel/parser': 7.24.5
|
||||
'@babel/types': 7.24.5
|
||||
source-map-js: 1.2.0
|
||||
dev: true
|
||||
|
||||
/make-dir@2.1.0:
|
||||
resolution: {integrity: sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==}
|
||||
engines: {node: '>=6'}
|
||||
@@ -9927,6 +10033,13 @@ packages:
|
||||
semver: 6.3.1
|
||||
dev: true
|
||||
|
||||
/make-dir@4.0.0:
|
||||
resolution: {integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==}
|
||||
engines: {node: '>=10'}
|
||||
dependencies:
|
||||
semver: 7.6.0
|
||||
dev: true
|
||||
|
||||
/map-obj@2.0.0:
|
||||
resolution: {integrity: sha512-TzQSV2DiMYgoF5RycneKVUzIa9bQsj/B3tTgsE3dOGqlzHnGIDaC7XBE7grnA+8kZPnfqSGFe95VHc2oc0VFUQ==}
|
||||
engines: {node: '>=4'}
|
||||
@@ -10101,6 +10214,11 @@ packages:
|
||||
resolution: {integrity: sha512-iSAJLHYKnX41mKcJKjqvnAN9sf0LMDTXDEvFv+ffuRR9a1MIuXLjMNL6EsnDHSkKLTWNqQQ5uo61P4EbU4NU+Q==}
|
||||
dev: false
|
||||
|
||||
/mrmime@2.0.0:
|
||||
resolution: {integrity: sha512-eu38+hdgojoyq63s+yTpN4XMBdt5l8HhMhc4VKLO9KM5caLIBvUm4thi7fFaxyTmCKeNnXZ5pAlBwCUnhA09uw==}
|
||||
engines: {node: '>=10'}
|
||||
dev: true
|
||||
|
||||
/ms@2.0.0:
|
||||
resolution: {integrity: sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==}
|
||||
dev: true
|
||||
@@ -11766,6 +11884,15 @@ packages:
|
||||
engines: {node: '>=14'}
|
||||
dev: true
|
||||
|
||||
/sirv@2.0.4:
|
||||
resolution: {integrity: sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ==}
|
||||
engines: {node: '>= 10'}
|
||||
dependencies:
|
||||
'@polka/url': 1.0.0-next.25
|
||||
mrmime: 2.0.0
|
||||
totalist: 3.0.1
|
||||
dev: true
|
||||
|
||||
/sisteransi@1.0.5:
|
||||
resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==}
|
||||
dev: true
|
||||
@@ -12191,6 +12318,15 @@ packages:
|
||||
unique-string: 2.0.0
|
||||
dev: true
|
||||
|
||||
/test-exclude@6.0.0:
|
||||
resolution: {integrity: sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==}
|
||||
engines: {node: '>=8'}
|
||||
dependencies:
|
||||
'@istanbuljs/schema': 0.1.3
|
||||
glob: 7.2.3
|
||||
minimatch: 3.1.2
|
||||
dev: true
|
||||
|
||||
/text-table@0.2.0:
|
||||
resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==}
|
||||
dev: true
|
||||
@@ -12264,6 +12400,11 @@ packages:
|
||||
engines: {node: '>=0.6'}
|
||||
dev: true
|
||||
|
||||
/totalist@3.0.1:
|
||||
resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==}
|
||||
engines: {node: '>=6'}
|
||||
dev: true
|
||||
|
||||
/tr46@0.0.3:
|
||||
resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==}
|
||||
|
||||
@@ -12837,7 +12978,7 @@ packages:
|
||||
fsevents: 2.3.3
|
||||
dev: true
|
||||
|
||||
/vitest@1.6.0(@types/node@20.12.10):
|
||||
/vitest@1.6.0(@types/node@20.12.10)(@vitest/ui@1.6.0):
|
||||
resolution: {integrity: sha512-H5r/dN06swuFnzNFhq/dnz37bPXnq8xB2xB5JOVk8K09rUtoeNN+LHWkoQ0A/i3hvbUKKcCei9KpbxqHMLhLLA==}
|
||||
engines: {node: ^18.0.0 || >=20.0.0}
|
||||
hasBin: true
|
||||
@@ -12867,6 +13008,7 @@ packages:
|
||||
'@vitest/runner': 1.6.0
|
||||
'@vitest/snapshot': 1.6.0
|
||||
'@vitest/spy': 1.6.0
|
||||
'@vitest/ui': 1.6.0(vitest@1.6.0)
|
||||
'@vitest/utils': 1.6.0
|
||||
acorn-walk: 8.3.2
|
||||
chai: 4.4.1
|
||||
|
||||
@@ -76,7 +76,9 @@
|
||||
"aboutHeading": "Nutzen Sie Ihre kreative Energie",
|
||||
"toResolve": "Lösen",
|
||||
"add": "Hinzufügen",
|
||||
"loglevel": "Protokoll Stufe"
|
||||
"loglevel": "Protokoll Stufe",
|
||||
"selected": "Ausgewählt",
|
||||
"beta": "Beta"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Bildgröße",
|
||||
@@ -86,7 +88,7 @@
|
||||
"noImagesInGallery": "Keine Bilder in der Galerie",
|
||||
"loading": "Lade",
|
||||
"deleteImage_one": "Lösche Bild",
|
||||
"deleteImage_other": "",
|
||||
"deleteImage_other": "Lösche {{count}} Bilder",
|
||||
"copy": "Kopieren",
|
||||
"download": "Runterladen",
|
||||
"setCurrentImage": "Setze aktuelle Bild",
|
||||
@@ -397,7 +399,14 @@
|
||||
"cancel": "Stornieren",
|
||||
"defaultSettingsSaved": "Standardeinstellungen gespeichert",
|
||||
"addModels": "Model hinzufügen",
|
||||
"deleteModelImage": "Lösche Model Bild"
|
||||
"deleteModelImage": "Lösche Model Bild",
|
||||
"hfTokenInvalidErrorMessage": "Falscher oder fehlender HuggingFace Schlüssel.",
|
||||
"huggingFaceRepoID": "HuggingFace Repo ID",
|
||||
"hfToken": "HuggingFace Schlüssel",
|
||||
"hfTokenInvalid": "Falscher oder fehlender HF Schlüssel",
|
||||
"huggingFacePlaceholder": "besitzer/model-name",
|
||||
"hfTokenSaved": "HF Schlüssel gespeichert",
|
||||
"hfTokenUnableToVerify": "Konnte den HF Schlüssel nicht validieren"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Bilder",
|
||||
@@ -686,7 +695,11 @@
|
||||
"hands": "Hände",
|
||||
"dwOpenpose": "DW Openpose",
|
||||
"dwOpenposeDescription": "Posenschätzung mit DW Openpose",
|
||||
"selectCLIPVisionModel": "Wähle ein CLIP Vision Model aus"
|
||||
"selectCLIPVisionModel": "Wähle ein CLIP Vision Model aus",
|
||||
"ipAdapterMethod": "Methode",
|
||||
"composition": "Nur Komposition",
|
||||
"full": "Voll",
|
||||
"style": "Nur Style"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@@ -717,7 +730,6 @@
|
||||
"resume": "Wieder aufnehmen",
|
||||
"item": "Auftrag",
|
||||
"notReady": "Warteschlange noch nicht bereit",
|
||||
"queueCountPrediction": "{{promptsCount}} Prompts × {{iterations}} Iterationen -> {{count}} Generationen",
|
||||
"clearQueueAlertDialog": "\"Die Warteschlange leeren\" stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
|
||||
"completedIn": "Fertig in",
|
||||
"cancelBatchSucceeded": "Stapel abgebrochen",
|
||||
|
||||
@@ -261,7 +261,6 @@
|
||||
"queue": "Queue",
|
||||
"queueFront": "Add to Front of Queue",
|
||||
"queueBack": "Add to Queue",
|
||||
"queueCountPrediction": "{{promptsCount}} prompts \u00d7 {{iterations}} iterations -> {{count}} generations",
|
||||
"queueEmpty": "Queue Empty",
|
||||
"enqueueing": "Queueing Batch",
|
||||
"resume": "Resume",
|
||||
@@ -314,7 +313,13 @@
|
||||
"batchFailedToQueue": "Failed to Queue Batch",
|
||||
"graphQueued": "Graph queued",
|
||||
"graphFailedToQueue": "Failed to queue graph",
|
||||
"openQueue": "Open Queue"
|
||||
"openQueue": "Open Queue",
|
||||
"prompts_one": "Prompt",
|
||||
"prompts_other": "Prompts",
|
||||
"iterations_one": "Iteration",
|
||||
"iterations_other": "Iterations",
|
||||
"generations_one": "Generation",
|
||||
"generations_other": "Generations"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "Invocation Cache",
|
||||
@@ -769,10 +774,15 @@
|
||||
"cannotConnectOutputToOutput": "Cannot connect output to output",
|
||||
"cannotConnectToSelf": "Cannot connect to self",
|
||||
"cannotDuplicateConnection": "Cannot create duplicate connections",
|
||||
"cannotMixAndMatchCollectionItemTypes": "Cannot mix and match collection item types",
|
||||
"missingNode": "Missing invocation node",
|
||||
"missingInvocationTemplate": "Missing invocation template",
|
||||
"missingFieldTemplate": "Missing field template",
|
||||
"nodePack": "Node pack",
|
||||
"collection": "Collection",
|
||||
"collectionFieldType": "{{name}} Collection",
|
||||
"collectionOrScalarFieldType": "{{name}} Collection|Scalar",
|
||||
"singleFieldType": "{{name}} (Single)",
|
||||
"collectionFieldType": "{{name}} (Collection)",
|
||||
"collectionOrScalarFieldType": "{{name}} (Single or Collection)",
|
||||
"colorCodeEdges": "Color-Code Edges",
|
||||
"colorCodeEdgesHelp": "Color-code edges according to their connected fields",
|
||||
"connectionWouldCreateCycle": "Connection would create a cycle",
|
||||
@@ -874,6 +884,7 @@
|
||||
"versionUnknown": " Version Unknown",
|
||||
"workflow": "Workflow",
|
||||
"graph": "Graph",
|
||||
"noGraph": "No Graph",
|
||||
"workflowAuthor": "Author",
|
||||
"workflowContact": "Contact",
|
||||
"workflowDescription": "Short Description",
|
||||
@@ -934,7 +945,20 @@
|
||||
"noModelSelected": "No model selected",
|
||||
"noPrompts": "No prompts generated",
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"systemDisconnected": "System disconnected"
|
||||
"systemDisconnected": "System disconnected",
|
||||
"layer": {
|
||||
"initialImageNoImageSelected": "no initial image selected",
|
||||
"controlAdapterNoModelSelected": "no Control Adapter model selected",
|
||||
"controlAdapterIncompatibleBaseModel": "incompatible Control Adapter base model",
|
||||
"controlAdapterNoImageSelected": "no Control Adapter image selected",
|
||||
"controlAdapterImageNotProcessed": "Control Adapter image not processed",
|
||||
"t2iAdapterIncompatibleDimensions": "T2I Adapter requires image dimension to be multiples of {{multiple}}",
|
||||
"ipAdapterNoModelSelected": "no IP adapter selected",
|
||||
"ipAdapterIncompatibleBaseModel": "incompatible IP Adapter base model",
|
||||
"ipAdapterNoImageSelected": "no IP Adapter image selected",
|
||||
"rgNoPromptsOrIPAdapters": "no text prompts or IP Adapters",
|
||||
"rgNoRegion": "no region selected"
|
||||
}
|
||||
},
|
||||
"maskBlur": "Mask Blur",
|
||||
"negativePromptPlaceholder": "Negative Prompt",
|
||||
@@ -945,8 +969,6 @@
|
||||
"positivePromptPlaceholder": "Positive Prompt",
|
||||
"globalPositivePromptPlaceholder": "Global Positive Prompt",
|
||||
"iterations": "Iterations",
|
||||
"iterationsWithCount_one": "{{count}} Iteration",
|
||||
"iterationsWithCount_other": "{{count}} Iterations",
|
||||
"scale": "Scale",
|
||||
"scaleBeforeProcessing": "Scale Before Processing",
|
||||
"scaledHeight": "Scaled H",
|
||||
|
||||
@@ -25,7 +25,24 @@
|
||||
"areYouSure": "¿Estas seguro?",
|
||||
"batch": "Administrador de lotes",
|
||||
"modelManager": "Administrador de modelos",
|
||||
"communityLabel": "Comunidad"
|
||||
"communityLabel": "Comunidad",
|
||||
"direction": "Dirección",
|
||||
"ai": "Ia",
|
||||
"add": "Añadir",
|
||||
"auto": "Automático",
|
||||
"copyError": "Error $t(gallery.copy)",
|
||||
"details": "Detalles",
|
||||
"or": "o",
|
||||
"checkpoint": "Punto de control",
|
||||
"controlNet": "ControlNet",
|
||||
"aboutHeading": "Sea dueño de su poder creativo",
|
||||
"advanced": "Avanzado",
|
||||
"data": "Fecha",
|
||||
"delete": "Borrar",
|
||||
"copy": "Copiar",
|
||||
"beta": "Beta",
|
||||
"on": "En",
|
||||
"aboutDesc": "¿Utilizas Invoke para trabajar? Mira aquí:"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Tamaño de la imagen",
|
||||
@@ -443,7 +460,13 @@
|
||||
"previousImage": "Imagen anterior",
|
||||
"nextImage": "Siguiente imagen",
|
||||
"showOptionsPanel": "Mostrar el panel lateral",
|
||||
"menu": "Menú"
|
||||
"menu": "Menú",
|
||||
"showGalleryPanel": "Mostrar panel de galería",
|
||||
"loadMore": "Cargar más",
|
||||
"about": "Acerca de",
|
||||
"createIssue": "Crear un problema",
|
||||
"resetUI": "Interfaz de usuario $t(accessibility.reset)",
|
||||
"mode": "Modo"
|
||||
},
|
||||
"nodes": {
|
||||
"zoomInNodes": "Acercar",
|
||||
@@ -456,5 +479,68 @@
|
||||
"reloadNodeTemplates": "Recargar las plantillas de nodos",
|
||||
"loadWorkflow": "Cargar el flujo de trabajo",
|
||||
"downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Agregar panel automáticamente",
|
||||
"changeBoard": "Cambiar el panel",
|
||||
"clearSearch": "Borrar la búsqueda",
|
||||
"deleteBoard": "Borrar el panel",
|
||||
"selectBoard": "Seleccionar un panel",
|
||||
"uncategorized": "Sin categoría",
|
||||
"cancel": "Cancelar",
|
||||
"addBoard": "Agregar un panel",
|
||||
"movingImagesToBoard_one": "Moviendo {{count}} imagen al panel:",
|
||||
"movingImagesToBoard_many": "Moviendo {{count}} imágenes al panel:",
|
||||
"movingImagesToBoard_other": "Moviendo {{count}} imágenes al panel:",
|
||||
"bottomMessage": "Al eliminar este panel y las imágenes que contiene, se restablecerán las funciones que los estén utilizando actualmente.",
|
||||
"deleteBoardAndImages": "Borrar el panel y las imágenes",
|
||||
"loading": "Cargando...",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar",
|
||||
"move": "Mover",
|
||||
"menuItemAutoAdd": "Agregar automáticamente a este panel",
|
||||
"searchBoard": "Buscando paneles…",
|
||||
"topMessage": "Este panel contiene imágenes utilizadas en las siguientes funciones:",
|
||||
"downloadBoard": "Descargar panel",
|
||||
"deleteBoardOnly": "Borrar solo el panel",
|
||||
"myBoard": "Mi panel",
|
||||
"noMatching": "No hay paneles que coincidan"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
"title": "Composición",
|
||||
"infillTab": "Relleno"
|
||||
},
|
||||
"generation": {
|
||||
"title": "Generación"
|
||||
},
|
||||
"image": {
|
||||
"title": "Imagen"
|
||||
},
|
||||
"control": {
|
||||
"title": "Control"
|
||||
},
|
||||
"advanced": {
|
||||
"options": "$t(accordions.advanced.title) opciones",
|
||||
"title": "Avanzado"
|
||||
}
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"canvas": "Lienzo",
|
||||
"generation": "Generación",
|
||||
"queue": "Cola",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
|
||||
"workflows": "Flujos de trabajo",
|
||||
"models": "Modelos",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)"
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
"layers_one": "Capa",
|
||||
"layers_many": "Capas",
|
||||
"layers_other": "Capas"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"reportBugLabel": "Segnala un errore",
|
||||
"settingsLabel": "Impostazioni",
|
||||
"img2img": "Immagine a Immagine",
|
||||
"unifiedCanvas": "Tela unificata",
|
||||
"unifiedCanvas": "Tela",
|
||||
"nodes": "Flussi di lavoro",
|
||||
"upload": "Caricamento",
|
||||
"load": "Carica",
|
||||
@@ -74,7 +74,18 @@
|
||||
"file": "File",
|
||||
"toResolve": "Da risolvere",
|
||||
"add": "Aggiungi",
|
||||
"loglevel": "Livello di log"
|
||||
"loglevel": "Livello di log",
|
||||
"beta": "Beta",
|
||||
"positivePrompt": "Prompt positivo",
|
||||
"negativePrompt": "Prompt negativo",
|
||||
"selected": "Selezionato",
|
||||
"goTo": "Vai a",
|
||||
"editor": "Editor",
|
||||
"tab": "Scheda",
|
||||
"viewing": "Visualizza",
|
||||
"viewingDesc": "Rivedi le immagini in un'ampia vista della galleria",
|
||||
"editing": "Modifica",
|
||||
"editingDesc": "Modifica nell'area Livelli di controllo"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -180,8 +191,8 @@
|
||||
"desc": "Mostra le informazioni sui metadati dell'immagine corrente"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "Invia a Immagine a Immagine",
|
||||
"desc": "Invia l'immagine corrente a da Immagine a Immagine"
|
||||
"title": "Invia a Generazione da immagine",
|
||||
"desc": "Invia l'immagine corrente a Generazione da immagine"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "Elimina immagine",
|
||||
@@ -334,6 +345,10 @@
|
||||
"remixImage": {
|
||||
"desc": "Utilizza tutti i parametri tranne il seme dell'immagine corrente",
|
||||
"title": "Remixa l'immagine"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Attiva/disattiva il visualizzatore di immagini",
|
||||
"desc": "Passa dal Visualizzatore immagini all'area di lavoro per la scheda corrente."
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
@@ -471,8 +486,8 @@
|
||||
"scaledHeight": "Altezza ridimensionata",
|
||||
"infillMethod": "Metodo di riempimento",
|
||||
"tileSize": "Dimensione piastrella",
|
||||
"sendToImg2Img": "Invia a Immagine a Immagine",
|
||||
"sendToUnifiedCanvas": "Invia a Tela Unificata",
|
||||
"sendToImg2Img": "Invia a Generazione da immagine",
|
||||
"sendToUnifiedCanvas": "Invia alla Tela",
|
||||
"downloadImage": "Scarica l'immagine",
|
||||
"usePrompt": "Usa Prompt",
|
||||
"useSeed": "Usa Seme",
|
||||
@@ -508,13 +523,11 @@
|
||||
"incompatibleBaseModelForControlAdapter": "Il modello dell'adattatore di controllo #{{number}} non è compatibile con il modello principale.",
|
||||
"missingNodeTemplate": "Modello di nodo mancante",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} ingresso mancante",
|
||||
"missingFieldTemplate": "Modello di campo mancante"
|
||||
"missingFieldTemplate": "Modello di campo mancante",
|
||||
"imageNotProcessedForControlAdapter": "L'immagine dell'adattatore di controllo #{{number}} non è stata elaborata"
|
||||
},
|
||||
"useCpuNoise": "Usa la CPU per generare rumore",
|
||||
"iterations": "Iterazioni",
|
||||
"iterationsWithCount_one": "{{count}} Iterazione",
|
||||
"iterationsWithCount_many": "{{count}} Iterazioni",
|
||||
"iterationsWithCount_other": "{{count}} Iterazioni",
|
||||
"isAllowedToUpscale": {
|
||||
"useX2Model": "L'immagine è troppo grande per l'ampliamento con il modello x4, utilizza il modello x2",
|
||||
"tooLarge": "L'immagine è troppo grande per l'ampliamento, seleziona un'immagine più piccola"
|
||||
@@ -534,7 +547,10 @@
|
||||
"infillMosaicMinColor": "Colore minimo",
|
||||
"infillMosaicMaxColor": "Colore massimo",
|
||||
"infillMosaicTileHeight": "Altezza piastrella",
|
||||
"infillColorValue": "Colore di riempimento"
|
||||
"infillColorValue": "Colore di riempimento",
|
||||
"globalSettings": "Impostazioni globali",
|
||||
"globalPositivePromptPlaceholder": "Prompt positivo globale",
|
||||
"globalNegativePromptPlaceholder": "Prompt negativo globale"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelli",
|
||||
@@ -559,7 +575,7 @@
|
||||
"intermediatesCleared_one": "Cancellata {{count}} immagine intermedia",
|
||||
"intermediatesCleared_many": "Cancellate {{count}} immagini intermedie",
|
||||
"intermediatesCleared_other": "Cancellate {{count}} immagini intermedie",
|
||||
"clearIntermediatesDesc1": "La cancellazione delle immagini intermedie ripristinerà lo stato di Tela Unificata e ControlNet.",
|
||||
"clearIntermediatesDesc1": "La cancellazione delle immagini intermedie ripristinerà lo stato della Tela e degli Adattatori di Controllo.",
|
||||
"intermediatesClearedFailed": "Problema con la cancellazione delle immagini intermedie",
|
||||
"clearIntermediatesWithCount_one": "Cancella {{count}} immagine intermedia",
|
||||
"clearIntermediatesWithCount_many": "Cancella {{count}} immagini intermedie",
|
||||
@@ -575,8 +591,8 @@
|
||||
"imageCopied": "Immagine copiata",
|
||||
"imageNotLoadedDesc": "Impossibile trovare l'immagine",
|
||||
"canvasMerged": "Tela unita",
|
||||
"sentToImageToImage": "Inviato a Immagine a Immagine",
|
||||
"sentToUnifiedCanvas": "Inviato a Tela Unificata",
|
||||
"sentToImageToImage": "Inviato a Generazione da immagine",
|
||||
"sentToUnifiedCanvas": "Inviato alla Tela",
|
||||
"parametersNotSet": "Parametri non impostati",
|
||||
"metadataLoadFailed": "Impossibile caricare i metadati",
|
||||
"serverError": "Errore del Server",
|
||||
@@ -795,7 +811,7 @@
|
||||
"float": "In virgola mobile",
|
||||
"currentImageDescription": "Visualizza l'immagine corrente nell'editor dei nodi",
|
||||
"fieldTypesMustMatch": "I tipi di campo devono corrispondere",
|
||||
"edge": "Bordo",
|
||||
"edge": "Collegamento",
|
||||
"currentImage": "Immagine corrente",
|
||||
"integer": "Numero Intero",
|
||||
"inputMayOnlyHaveOneConnection": "L'ingresso può avere solo una connessione",
|
||||
@@ -845,7 +861,9 @@
|
||||
"resetToDefaultValue": "Ripristina il valore predefinito",
|
||||
"noFieldsViewMode": "Questo flusso di lavoro non ha campi selezionati da visualizzare. Visualizza il flusso di lavoro completo per configurare i valori.",
|
||||
"edit": "Modifica",
|
||||
"graph": "Grafico"
|
||||
"graph": "Grafico",
|
||||
"showEdgeLabelsHelp": "Mostra etichette sui collegamenti, che indicano i nodi collegati",
|
||||
"showEdgeLabels": "Mostra le etichette del collegamento"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@@ -922,7 +940,7 @@
|
||||
"colorMapTileSize": "Dimensione piastrella",
|
||||
"mediapipeFaceDescription": "Rilevamento dei volti tramite Mediapipe",
|
||||
"hedDescription": "Rilevamento dei bordi nidificati olisticamente",
|
||||
"setControlImageDimensions": "Imposta le dimensioni dell'immagine di controllo su L/A",
|
||||
"setControlImageDimensions": "Copia le dimensioni in L/A (ottimizza per il modello)",
|
||||
"maxFaces": "Numero massimo di volti",
|
||||
"addT2IAdapter": "Aggiungi $t(common.t2iAdapter)",
|
||||
"addControlNet": "Aggiungi $t(common.controlNet)",
|
||||
@@ -951,12 +969,17 @@
|
||||
"mediapipeFace": "Mediapipe Volto",
|
||||
"ip_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.ipAdapter))",
|
||||
"t2i_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.t2iAdapter))",
|
||||
"selectCLIPVisionModel": "Seleziona un modello CLIP Vision"
|
||||
"selectCLIPVisionModel": "Seleziona un modello CLIP Vision",
|
||||
"ipAdapterMethod": "Metodo",
|
||||
"full": "Completo",
|
||||
"composition": "Solo la composizione",
|
||||
"style": "Solo lo stile",
|
||||
"beginEndStepPercentShort": "Inizio/Fine %",
|
||||
"setControlImageDimensionsForce": "Copia le dimensioni in L/A (ignora il modello)"
|
||||
},
|
||||
"queue": {
|
||||
"queueFront": "Aggiungi all'inizio della coda",
|
||||
"queueBack": "Aggiungi alla coda",
|
||||
"queueCountPrediction": "{{promptsCount}} prompt × {{iterations}} iterazioni -> {{count}} generazioni",
|
||||
"queue": "Coda",
|
||||
"status": "Stato",
|
||||
"pruneSucceeded": "Rimossi {{item_count}} elementi completati dalla coda",
|
||||
@@ -993,7 +1016,7 @@
|
||||
"cancelBatchSucceeded": "Lotto annullato",
|
||||
"clearTooltip": "Annulla e cancella tutti gli elementi",
|
||||
"current": "Attuale",
|
||||
"pauseTooltip": "Sospende l'elaborazione",
|
||||
"pauseTooltip": "Sospendi l'elaborazione",
|
||||
"failed": "Falliti",
|
||||
"cancelItem": "Annulla l'elemento",
|
||||
"next": "Prossimo",
|
||||
@@ -1394,6 +1417,12 @@
|
||||
"paragraphs": [
|
||||
"La dimensione del bordo del passaggio di coerenza."
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Metodo",
|
||||
"paragraphs": [
|
||||
"Metodo con cui applicare l'adattatore IP corrente."
|
||||
]
|
||||
}
|
||||
},
|
||||
"sdxl": {
|
||||
@@ -1522,5 +1551,56 @@
|
||||
"compatibleEmbeddings": "Incorporamenti compatibili",
|
||||
"addPromptTrigger": "Aggiungi Trigger nel prompt",
|
||||
"noMatchingTriggers": "Nessun Trigger corrispondente"
|
||||
},
|
||||
"controlLayers": {
|
||||
"opacityFilter": "Filtro opacità",
|
||||
"deleteAll": "Cancella tutto",
|
||||
"addLayer": "Aggiungi Livello",
|
||||
"moveToFront": "Sposta in primo piano",
|
||||
"moveToBack": "Sposta in fondo",
|
||||
"moveForward": "Sposta avanti",
|
||||
"moveBackward": "Sposta indietro",
|
||||
"brushSize": "Dimensioni del pennello",
|
||||
"globalMaskOpacity": "Opacità globale della maschera",
|
||||
"autoNegative": "Auto Negativo",
|
||||
"toggleVisibility": "Attiva/disattiva la visibilità dei livelli",
|
||||
"deletePrompt": "Cancella il prompt",
|
||||
"debugLayers": "Debug dei Livelli",
|
||||
"rectangle": "Rettangolo",
|
||||
"maskPreviewColor": "Colore anteprima maschera",
|
||||
"addPositivePrompt": "Aggiungi $t(common.positivePrompt)",
|
||||
"addNegativePrompt": "Aggiungi $t(common.negativePrompt)",
|
||||
"addIPAdapter": "Aggiungi $t(common.ipAdapter)",
|
||||
"regionalGuidance": "Guida regionale",
|
||||
"regionalGuidanceLayer": "$t(unifiedCanvas.layer) $t(controlLayers.regionalGuidance)",
|
||||
"opacity": "Opacità",
|
||||
"globalControlAdapter": "$t(controlnet.controlAdapter_one) Globale",
|
||||
"globalControlAdapterLayer": "$t(controlnet.controlAdapter_one) - $t(unifiedCanvas.layer) Globale",
|
||||
"globalIPAdapter": "$t(common.ipAdapter) Globale",
|
||||
"globalIPAdapterLayer": "$t(common.ipAdapter) - $t(unifiedCanvas.layer) Globale",
|
||||
"globalInitialImage": "Immagine iniziale",
|
||||
"globalInitialImageLayer": "$t(controlLayers.globalInitialImage) - $t(unifiedCanvas.layer) Globale",
|
||||
"clearProcessor": "Cancella processore",
|
||||
"resetProcessor": "Ripristina il processore alle impostazioni predefinite",
|
||||
"noLayersAdded": "Nessun livello aggiunto",
|
||||
"resetRegion": "Reimposta la regione",
|
||||
"controlLayers": "Livelli di controllo",
|
||||
"layers_one": "Livello",
|
||||
"layers_many": "Livelli",
|
||||
"layers_other": "Livelli"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "Generazione",
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"canvas": "Tela",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"workflows": "Flussi di lavoro",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"models": "Modelli",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"queue": "Coda",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -570,7 +570,6 @@
|
||||
"pauseSucceeded": "処理が一時停止されました",
|
||||
"queueFront": "キューの先頭へ追加",
|
||||
"queueBack": "キューに追加",
|
||||
"queueCountPrediction": "{{promptsCount}} プロンプト × {{iterations}} イテレーション -> {{count}} 枚生成",
|
||||
"pause": "一時停止",
|
||||
"queue": "キュー",
|
||||
"pauseTooltip": "処理を一時停止",
|
||||
|
||||
@@ -505,7 +505,6 @@
|
||||
"completed": "완성된",
|
||||
"queueBack": "Queue에 추가",
|
||||
"cancelFailed": "항목 취소 중 발생한 문제",
|
||||
"queueCountPrediction": "Queue에 {{predicted}} 추가",
|
||||
"batchQueued": "Batch Queued",
|
||||
"pauseFailed": "프로세서 중지 중 발생한 문제",
|
||||
"clearFailed": "Queue 제거 중 발생한 문제",
|
||||
|
||||
@@ -383,8 +383,6 @@
|
||||
"useCpuNoise": "Gebruik CPU-ruis",
|
||||
"imageActions": "Afbeeldingshandeling",
|
||||
"iterations": "Iteraties",
|
||||
"iterationsWithCount_one": "{{count}} iteratie",
|
||||
"iterationsWithCount_other": "{{count}} iteraties",
|
||||
"coherenceMode": "Modus"
|
||||
},
|
||||
"settings": {
|
||||
@@ -940,7 +938,6 @@
|
||||
"completed": "Voltooid",
|
||||
"queueBack": "Voeg toe aan wachtrij",
|
||||
"cancelFailed": "Fout bij annuleren onderdeel",
|
||||
"queueCountPrediction": "Voeg {{predicted}} toe aan wachtrij",
|
||||
"batchQueued": "Reeks in wachtrij geplaatst",
|
||||
"pauseFailed": "Fout bij onderbreken verwerker",
|
||||
"clearFailed": "Fout bij wissen van wachtrij",
|
||||
|
||||
@@ -76,7 +76,18 @@
|
||||
"localSystem": "Локальная система",
|
||||
"aboutDesc": "Используя Invoke для работы? Проверьте это:",
|
||||
"add": "Добавить",
|
||||
"loglevel": "Уровень логов"
|
||||
"loglevel": "Уровень логов",
|
||||
"beta": "Бета",
|
||||
"selected": "Выбрано",
|
||||
"positivePrompt": "Позитивный запрос",
|
||||
"negativePrompt": "Негативный запрос",
|
||||
"editor": "Редактор",
|
||||
"goTo": "Перейти к",
|
||||
"tab": "Вкладка",
|
||||
"viewing": "Просмотр",
|
||||
"editing": "Редактирование",
|
||||
"viewingDesc": "Просмотр изображений в режиме большой галереи",
|
||||
"editingDesc": "Редактировать на холсте слоёв управления"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Размер изображений",
|
||||
@@ -87,8 +98,8 @@
|
||||
"deleteImagePermanent": "Удаленные изображения невозможно восстановить.",
|
||||
"deleteImageBin": "Удаленные изображения будут отправлены в корзину вашей операционной системы.",
|
||||
"deleteImage_one": "Удалить изображение",
|
||||
"deleteImage_few": "",
|
||||
"deleteImage_many": "",
|
||||
"deleteImage_few": "Удалить {{count}} изображения",
|
||||
"deleteImage_many": "Удалить {{count}} изображений",
|
||||
"assets": "Ресурсы",
|
||||
"autoAssignBoardOnClick": "Авто-назначение доски по клику",
|
||||
"deleteSelection": "Удалить выделенное",
|
||||
@@ -336,6 +347,10 @@
|
||||
"remixImage": {
|
||||
"desc": "Используйте все параметры, кроме сида из текущего изображения",
|
||||
"title": "Ремикс изображения"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Переключить просмотр изображений",
|
||||
"desc": "Переключение между средством просмотра изображений и рабочей областью для текущей вкладки."
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
@@ -512,7 +527,8 @@
|
||||
"missingNodeTemplate": "Отсутствует шаблон узла",
|
||||
"missingFieldTemplate": "Отсутствует шаблон поля",
|
||||
"addingImagesTo": "Добавление изображений в",
|
||||
"invoke": "Создать"
|
||||
"invoke": "Создать",
|
||||
"imageNotProcessedForControlAdapter": "Изображение адаптера контроля №{{number}} не обрабатывается"
|
||||
},
|
||||
"isAllowedToUpscale": {
|
||||
"useX2Model": "Изображение слишком велико для увеличения с помощью модели x4. Используйте модель x2",
|
||||
@@ -523,9 +539,6 @@
|
||||
"useCpuNoise": "Использовать шум CPU",
|
||||
"imageActions": "Действия с изображениями",
|
||||
"iterations": "Кол-во",
|
||||
"iterationsWithCount_one": "{{count}} Интеграция",
|
||||
"iterationsWithCount_few": "{{count}} Итерации",
|
||||
"iterationsWithCount_many": "{{count}} Итераций",
|
||||
"useSize": "Использовать размер",
|
||||
"coherenceMode": "Режим",
|
||||
"aspect": "Соотношение",
|
||||
@@ -541,7 +554,10 @@
|
||||
"infillMosaicTileHeight": "Высота плиток",
|
||||
"infillMosaicMinColor": "Мин цвет",
|
||||
"infillMosaicMaxColor": "Макс цвет",
|
||||
"infillColorValue": "Цвет заливки"
|
||||
"infillColorValue": "Цвет заливки",
|
||||
"globalSettings": "Глобальные настройки",
|
||||
"globalNegativePromptPlaceholder": "Глобальный негативный запрос",
|
||||
"globalPositivePromptPlaceholder": "Глобальный запрос"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Модели",
|
||||
@@ -706,7 +722,9 @@
|
||||
"coherenceModeBoxBlur": "коробчатое размытие",
|
||||
"discardCurrent": "Отбросить текущее",
|
||||
"invertBrushSizeScrollDirection": "Инвертировать прокрутку для размера кисти",
|
||||
"initialFitImageSize": "Подогнать размер изображения при перебросе"
|
||||
"initialFitImageSize": "Подогнать размер изображения при перебросе",
|
||||
"hideBoundingBox": "Скрыть ограничительную рамку",
|
||||
"showBoundingBox": "Показать ограничительную рамку"
|
||||
},
|
||||
"accessibility": {
|
||||
"uploadImage": "Загрузить изображение",
|
||||
@@ -849,7 +867,10 @@
|
||||
"editMode": "Открыть в редакторе узлов",
|
||||
"resetToDefaultValue": "Сбросить к стандартному значкнию",
|
||||
"edit": "Редактировать",
|
||||
"noFieldsViewMode": "В этом рабочем процессе нет выбранных полей для отображения. Просмотрите полный рабочий процесс для настройки значений."
|
||||
"noFieldsViewMode": "В этом рабочем процессе нет выбранных полей для отображения. Просмотрите полный рабочий процесс для настройки значений.",
|
||||
"graph": "График",
|
||||
"showEdgeLabels": "Показать метки на ребрах",
|
||||
"showEdgeLabelsHelp": "Показать метки на ребрах, указывающие на соединенные узлы"
|
||||
},
|
||||
"controlnet": {
|
||||
"amult": "a_mult",
|
||||
@@ -917,8 +938,8 @@
|
||||
"lineartAnime": "Контурный рисунок в стиле аниме",
|
||||
"mediapipeFaceDescription": "Обнаружение лиц с помощью Mediapipe",
|
||||
"hedDescription": "Целостное обнаружение границ",
|
||||
"setControlImageDimensions": "Установите размеры контрольного изображения на Ш/В",
|
||||
"scribble": "каракули",
|
||||
"setControlImageDimensions": "Скопируйте размер в Ш/В (оптимизируйте для модели)",
|
||||
"scribble": "Штрихи",
|
||||
"maxFaces": "Макс Лица",
|
||||
"mlsdDescription": "Минималистичный детектор отрезков линии",
|
||||
"resizeSimple": "Изменить размер (простой)",
|
||||
@@ -933,7 +954,18 @@
|
||||
"small": "Маленький",
|
||||
"body": "Тело",
|
||||
"hands": "Руки",
|
||||
"selectCLIPVisionModel": "Выбрать модель CLIP Vision"
|
||||
"selectCLIPVisionModel": "Выбрать модель CLIP Vision",
|
||||
"ipAdapterMethod": "Метод",
|
||||
"full": "Всё",
|
||||
"mlsd": "M-LSD",
|
||||
"h": "H",
|
||||
"style": "Только стиль",
|
||||
"dwOpenpose": "DW Openpose",
|
||||
"pidi": "PIDI",
|
||||
"composition": "Только композиция",
|
||||
"hed": "HED",
|
||||
"beginEndStepPercentShort": "Начало/конец %",
|
||||
"setControlImageDimensionsForce": "Скопируйте размер в Ш/В (игнорируйте модель)"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Авто добавление Доски",
|
||||
@@ -1312,6 +1344,12 @@
|
||||
"paragraphs": [
|
||||
"Плавно укладывайте изображение вдоль вертикальной оси."
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Метод",
|
||||
"paragraphs": [
|
||||
"Метод, с помощью которого применяется текущий IP-адаптер."
|
||||
]
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
@@ -1359,7 +1397,6 @@
|
||||
"completed": "Выполнено",
|
||||
"queueBack": "Добавить в очередь",
|
||||
"cancelFailed": "Проблема с отменой элемента",
|
||||
"queueCountPrediction": "{{promptsCount}} запросов × {{iterations}} изображений -> {{count}} генераций",
|
||||
"batchQueued": "Пакетная очередь",
|
||||
"pauseFailed": "Проблема с приостановкой рендеринга",
|
||||
"clearFailed": "Проблема с очисткой очереди",
|
||||
@@ -1475,7 +1512,11 @@
|
||||
"projectWorkflows": "Рабочие процессы проекта",
|
||||
"defaultWorkflows": "Стандартные рабочие процессы",
|
||||
"name": "Имя",
|
||||
"noRecentWorkflows": "Нет последних рабочих процессов"
|
||||
"noRecentWorkflows": "Нет последних рабочих процессов",
|
||||
"loadWorkflow": "Рабочий процесс $t(common.load)",
|
||||
"convertGraph": "Конвертировать график",
|
||||
"loadFromGraph": "Загрузка рабочего процесса из графика",
|
||||
"autoLayout": "Автоматическое расположение"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Включить исправление высокого разрешения",
|
||||
@@ -1528,5 +1569,56 @@
|
||||
"addPromptTrigger": "Добавить триггер запроса",
|
||||
"compatibleEmbeddings": "Совместимые встраивания",
|
||||
"noMatchingTriggers": "Нет соответствующих триггеров"
|
||||
},
|
||||
"controlLayers": {
|
||||
"moveToBack": "На задний план",
|
||||
"moveForward": "Переместить вперёд",
|
||||
"moveBackward": "Переместить назад",
|
||||
"brushSize": "Размер кисти",
|
||||
"controlLayers": "Слои управления",
|
||||
"globalMaskOpacity": "Глобальная непрозрачность маски",
|
||||
"autoNegative": "Авто негатив",
|
||||
"deletePrompt": "Удалить запрос",
|
||||
"resetRegion": "Сбросить регион",
|
||||
"debugLayers": "Слои отладки",
|
||||
"rectangle": "Прямоугольник",
|
||||
"maskPreviewColor": "Цвет предпросмотра маски",
|
||||
"addNegativePrompt": "Добавить $t(common.negativePrompt)",
|
||||
"regionalGuidance": "Региональная точность",
|
||||
"opacity": "Непрозрачность",
|
||||
"globalControlAdapter": "Глобальный $t(controlnet.controlAdapter_one)",
|
||||
"globalControlAdapterLayer": "Глобальный $t(controlnet.controlAdapter_one) $t(unifiedCanvas.layer)",
|
||||
"globalIPAdapter": "Глобальный $t(common.ipAdapter)",
|
||||
"globalIPAdapterLayer": "Глобальный $t(common.ipAdapter) $t(unifiedCanvas.layer)",
|
||||
"opacityFilter": "Фильтр непрозрачности",
|
||||
"deleteAll": "Удалить всё",
|
||||
"addLayer": "Добавить слой",
|
||||
"moveToFront": "На передний план",
|
||||
"toggleVisibility": "Переключить видимость слоя",
|
||||
"addPositivePrompt": "Добавить $t(common.positivePrompt)",
|
||||
"addIPAdapter": "Добавить $t(common.ipAdapter)",
|
||||
"regionalGuidanceLayer": "$t(controlLayers.regionalGuidance) $t(unifiedCanvas.layer)",
|
||||
"resetProcessor": "Сброс процессора по умолчанию",
|
||||
"clearProcessor": "Чистый процессор",
|
||||
"globalInitialImage": "Глобальное исходное изображение",
|
||||
"globalInitialImageLayer": "$t(controlLayers.globalInitialImage) $t(unifiedCanvas.layer)",
|
||||
"noLayersAdded": "Без слоев",
|
||||
"layers_one": "Слой",
|
||||
"layers_few": "Слоя",
|
||||
"layers_many": "Слоев"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "Генерация",
|
||||
"canvas": "Холст",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"models": "Модели",
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"workflows": "Рабочие процессы",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"queue": "Очередь"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
"saveAs": "保存为",
|
||||
"ai": "ai",
|
||||
"or": "或",
|
||||
"aboutDesc": "使用 Invoke 工作?查看:",
|
||||
"aboutDesc": "使用 Invoke 工作?来看看:",
|
||||
"add": "添加",
|
||||
"loglevel": "日志级别",
|
||||
"copy": "复制",
|
||||
@@ -445,7 +445,6 @@
|
||||
"useX2Model": "图像太大,无法使用 x4 模型,使用 x2 模型作为替代",
|
||||
"tooLarge": "图像太大无法进行放大,请选择更小的图像"
|
||||
},
|
||||
"iterationsWithCount_other": "{{count}} 次迭代生成",
|
||||
"cfgRescaleMultiplier": "CFG 重缩放倍数",
|
||||
"useSize": "使用尺寸",
|
||||
"setToOptimalSize": "优化模型大小",
|
||||
@@ -853,7 +852,6 @@
|
||||
"pruneSucceeded": "从队列修剪 {{item_count}} 个已完成的项目",
|
||||
"notReady": "无法排队",
|
||||
"batchFailedToQueue": "批次加入队列失败",
|
||||
"queueCountPrediction": "{{promptsCount}} 提示词 × {{iterations}} 迭代次数 -> {{count}} 次生成",
|
||||
"batchQueued": "加入队列的批次",
|
||||
"front": "前",
|
||||
"pruneTooltip": "修剪 {{item_count}} 个已完成的项目",
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
/* eslint-disable no-console */
|
||||
import fs from 'node:fs';
|
||||
|
||||
import openapiTS from 'openapi-typescript';
|
||||
|
||||
@@ -21,6 +21,7 @@ import i18n from 'i18n';
|
||||
import { size } from 'lodash-es';
|
||||
import { memo, useCallback, useEffect } from 'react';
|
||||
import { ErrorBoundary } from 'react-error-boundary';
|
||||
import { useGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
|
||||
|
||||
import AppErrorBoundaryFallback from './AppErrorBoundaryFallback';
|
||||
import PreselectedImage from './PreselectedImage';
|
||||
@@ -46,6 +47,7 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
|
||||
useSocketIO();
|
||||
useGlobalModifiersInit();
|
||||
useGlobalHotkeys();
|
||||
useGetOpenAPISchemaQuery();
|
||||
|
||||
const { dropzone, isHandlingUpload, setIsHandlingUpload } = useFullscreenDropzone();
|
||||
|
||||
|
||||
@@ -67,6 +67,8 @@ export const useSocketIO = () => {
|
||||
|
||||
if ($isDebugging.get() || import.meta.env.MODE === 'development') {
|
||||
window.$socketOptions = $socketOptions;
|
||||
// This is only enabled manually for debugging, console is allowed.
|
||||
/* eslint-disable-next-line no-console */
|
||||
console.log('Socket initialized', socket);
|
||||
}
|
||||
|
||||
@@ -75,6 +77,8 @@ export const useSocketIO = () => {
|
||||
return () => {
|
||||
if ($isDebugging.get() || import.meta.env.MODE === 'development') {
|
||||
window.$socketOptions = undefined;
|
||||
// This is only enabled manually for debugging, console is allowed.
|
||||
/* eslint-disable-next-line no-console */
|
||||
console.log('Socket teardown', socket);
|
||||
}
|
||||
socket.disconnect();
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/* eslint-disable no-console */
|
||||
// This is only enabled manually for debugging, console is allowed.
|
||||
|
||||
import type { Middleware, MiddlewareAPI } from '@reduxjs/toolkit';
|
||||
import { diff } from 'jsondiffpatch';
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import type { UnknownAction } from '@reduxjs/toolkit';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { isAnyGraphBuilt } from 'features/nodes/store/actions';
|
||||
import { nodeTemplatesBuilt } from 'features/nodes/store/nodesSlice';
|
||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
import type { Graph } from 'services/api/types';
|
||||
import { socketGeneratorProgress } from 'services/events/actions';
|
||||
@@ -25,13 +24,6 @@ export const actionSanitizer = <A extends UnknownAction>(action: A): A => {
|
||||
};
|
||||
}
|
||||
|
||||
if (nodeTemplatesBuilt.match(action)) {
|
||||
return {
|
||||
...action,
|
||||
payload: '<Node templates omitted>',
|
||||
};
|
||||
}
|
||||
|
||||
if (socketGeneratorProgress.match(action)) {
|
||||
const sanitized = deepClone(action);
|
||||
if (sanitized.payload.data.progress_image) {
|
||||
|
||||
@@ -21,7 +21,7 @@ export const addDeleteBoardAndImagesFulfilledListener = (startAppListening: AppS
|
||||
|
||||
const { canvas, nodes, controlAdapters, controlLayers } = getState();
|
||||
deleted_images.forEach((image_name) => {
|
||||
const imageUsage = getImageUsage(canvas, nodes, controlAdapters, controlLayers.present, image_name);
|
||||
const imageUsage = getImageUsage(canvas, nodes.present, controlAdapters, controlLayers.present, image_name);
|
||||
|
||||
if (imageUsage.isCanvasImage && !wasCanvasReset) {
|
||||
dispatch(resetCanvas());
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { canvasSavedToGallery } from 'features/canvas/store/actions';
|
||||
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
@@ -43,6 +44,9 @@ export const addCanvasSavedToGalleryListener = (startAppListening: AppStartListe
|
||||
type: 'TOAST',
|
||||
toastOptions: { title: t('toast.canvasSavedGallery') },
|
||||
},
|
||||
metadata: {
|
||||
_canvas_objects: parseify(state.canvas.layerState.objects),
|
||||
},
|
||||
})
|
||||
);
|
||||
},
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppDispatch } from 'app/store/store';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import {
|
||||
caLayerImageChanged,
|
||||
caLayerIsProcessingImageChanged,
|
||||
caLayerModelChanged,
|
||||
caLayerProcessedImageChanged,
|
||||
caLayerProcessorConfigChanged,
|
||||
caLayerProcessorPendingBatchIdChanged,
|
||||
caLayerRecalled,
|
||||
isControlAdapterLayer,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
@@ -16,46 +17,41 @@ import { isImageOutput } from 'features/nodes/types/common';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { t } from 'i18next';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { getImageDTO } from 'services/api/endpoints/images';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig, ImageDTO } from 'services/api/types';
|
||||
import type { BatchConfig } from 'services/api/types';
|
||||
import { socketInvocationComplete } from 'services/events/actions';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const matcher = isAnyOf(caLayerImageChanged, caLayerProcessorConfigChanged, caLayerModelChanged, caLayerRecalled);
|
||||
|
||||
const DEBOUNCE_MS = 300;
|
||||
const log = logger('session');
|
||||
|
||||
/**
|
||||
* Simple helper to cancel a batch and reset the pending batch ID
|
||||
*/
|
||||
const cancelProcessorBatch = async (dispatch: AppDispatch, layerId: string, batchId: string) => {
|
||||
const req = dispatch(queueApi.endpoints.cancelByBatchIds.initiate({ batch_ids: [batchId] }));
|
||||
log.trace({ batchId }, 'Cancelling existing preprocessor batch');
|
||||
try {
|
||||
await req.unwrap();
|
||||
} catch {
|
||||
// no-op
|
||||
} finally {
|
||||
req.reset();
|
||||
// Always reset the pending batch ID - the cancel req could fail if the batch doesn't exist
|
||||
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: null }));
|
||||
}
|
||||
};
|
||||
|
||||
export const addControlAdapterPreprocessor = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
matcher,
|
||||
effect: async (action, { dispatch, getState, getOriginalState, cancelActiveListeners, delay, take }) => {
|
||||
effect: async (action, { dispatch, getState, getOriginalState, cancelActiveListeners, delay, take, signal }) => {
|
||||
const layerId = caLayerRecalled.match(action) ? action.payload.id : action.payload.layerId;
|
||||
const precheckLayerOriginal = getOriginalState()
|
||||
.controlLayers.present.layers.filter(isControlAdapterLayer)
|
||||
.find((l) => l.id === layerId);
|
||||
const precheckLayer = getState()
|
||||
.controlLayers.present.layers.filter(isControlAdapterLayer)
|
||||
.find((l) => l.id === layerId);
|
||||
|
||||
// Conditions to bail
|
||||
const layerDoesNotExist = !precheckLayer;
|
||||
const layerHasNoImage = !precheckLayer?.controlAdapter.image;
|
||||
const layerHasNoProcessorConfig = !precheckLayer?.controlAdapter.processorConfig;
|
||||
const layerIsAlreadyProcessingImage = precheckLayer?.controlAdapter.isProcessingImage;
|
||||
const areImageAndProcessorUnchanged =
|
||||
isEqual(precheckLayer?.controlAdapter.image, precheckLayerOriginal?.controlAdapter.image) &&
|
||||
isEqual(precheckLayer?.controlAdapter.processorConfig, precheckLayerOriginal?.controlAdapter.processorConfig);
|
||||
|
||||
if (
|
||||
layerDoesNotExist ||
|
||||
layerHasNoImage ||
|
||||
layerHasNoProcessorConfig ||
|
||||
areImageAndProcessorUnchanged ||
|
||||
layerIsAlreadyProcessingImage
|
||||
) {
|
||||
return;
|
||||
}
|
||||
const state = getState();
|
||||
const originalState = getOriginalState();
|
||||
|
||||
// Cancel any in-progress instances of this listener
|
||||
cancelActiveListeners();
|
||||
@@ -63,27 +59,55 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
|
||||
|
||||
// Delay before starting actual work
|
||||
await delay(DEBOUNCE_MS);
|
||||
dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: true }));
|
||||
|
||||
// Double-check that we are still eligible for processing
|
||||
const state = getState();
|
||||
const layer = state.controlLayers.present.layers.filter(isControlAdapterLayer).find((l) => l.id === layerId);
|
||||
const image = layer?.controlAdapter.image;
|
||||
const config = layer?.controlAdapter.processorConfig;
|
||||
|
||||
// If we have no image or there is no processor config, bail
|
||||
if (!layer || !image || !config) {
|
||||
if (!layer) {
|
||||
return;
|
||||
}
|
||||
|
||||
// @ts-expect-error: TS isn't able to narrow the typing of buildNode and `config` will error...
|
||||
const processorNode = CA_PROCESSOR_DATA[config.type].buildNode(image, config);
|
||||
// We should only process if the processor settings or image have changed
|
||||
const originalLayer = originalState.controlLayers.present.layers
|
||||
.filter(isControlAdapterLayer)
|
||||
.find((l) => l.id === layerId);
|
||||
const originalImage = originalLayer?.controlAdapter.image;
|
||||
const originalConfig = originalLayer?.controlAdapter.processorConfig;
|
||||
|
||||
const image = layer.controlAdapter.image;
|
||||
const config = layer.controlAdapter.processorConfig;
|
||||
|
||||
if (isEqual(config, originalConfig) && isEqual(image, originalImage)) {
|
||||
// Neither config nor image have changed, we can bail
|
||||
return;
|
||||
}
|
||||
|
||||
if (!image || !config) {
|
||||
// - If we have no image, we have nothing to process
|
||||
// - If we have no processor config, we have nothing to process
|
||||
// Clear the processed image and bail
|
||||
dispatch(caLayerProcessedImageChanged({ layerId, imageDTO: null }));
|
||||
return;
|
||||
}
|
||||
|
||||
// At this point, the user has stopped fiddling with the processor settings and there is a processor selected.
|
||||
|
||||
// If there is a pending processor batch, cancel it.
|
||||
if (layer.controlAdapter.processorPendingBatchId) {
|
||||
cancelProcessorBatch(dispatch, layerId, layer.controlAdapter.processorPendingBatchId);
|
||||
}
|
||||
|
||||
// TODO(psyche): I can't get TS to be happy, it thinkgs `config` is `never` but it should be inferred from the generic... I'll just cast it for now
|
||||
const processorNode = CA_PROCESSOR_DATA[config.type].buildNode(image, config as never);
|
||||
const enqueueBatchArg: BatchConfig = {
|
||||
prepend: true,
|
||||
batch: {
|
||||
graph: {
|
||||
nodes: {
|
||||
[processorNode.id]: { ...processorNode, is_intermediate: true },
|
||||
[processorNode.id]: {
|
||||
...processorNode,
|
||||
// Control images are always intermediate - do not save to gallery
|
||||
is_intermediate: true,
|
||||
},
|
||||
},
|
||||
edges: [],
|
||||
},
|
||||
@@ -91,16 +115,21 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
|
||||
},
|
||||
};
|
||||
|
||||
// Kick off the processor batch
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
|
||||
try {
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
const enqueueResult = await req.unwrap();
|
||||
req.reset();
|
||||
// TODO(psyche): Update the pydantic models, pretty sure we will _always_ have a batch_id here, but the model says it's optional
|
||||
assert(enqueueResult.batch.batch_id, 'Batch ID not returned from queue');
|
||||
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: enqueueResult.batch.batch_id }));
|
||||
log.debug({ enqueueResult: parseify(enqueueResult) }, t('queue.graphQueued'));
|
||||
|
||||
// Wait for the processor node to complete
|
||||
const [invocationCompleteAction] = await take(
|
||||
(action): action is ReturnType<typeof socketInvocationComplete> =>
|
||||
socketInvocationComplete.match(action) &&
|
||||
@@ -109,47 +138,51 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
|
||||
);
|
||||
|
||||
// We still have to check the output type
|
||||
if (isImageOutput(invocationCompleteAction.payload.data.result)) {
|
||||
const { image_name } = invocationCompleteAction.payload.data.result.image;
|
||||
assert(
|
||||
isImageOutput(invocationCompleteAction.payload.data.result),
|
||||
`Processor did not return an image output, got: ${invocationCompleteAction.payload.data.result}`
|
||||
);
|
||||
const { image_name } = invocationCompleteAction.payload.data.result.image;
|
||||
|
||||
// Wait for the ImageDTO to be received
|
||||
const [{ payload }] = await take(
|
||||
(action) =>
|
||||
imagesApi.endpoints.getImageDTO.matchFulfilled(action) && action.payload.image_name === image_name
|
||||
);
|
||||
const imageDTO = await getImageDTO(image_name);
|
||||
assert(imageDTO, "Failed to fetch processor output's image DTO");
|
||||
|
||||
const imageDTO = payload as ImageDTO;
|
||||
|
||||
log.debug({ layerId, imageDTO }, 'ControlNet image processed');
|
||||
|
||||
// Update the processed image in the store
|
||||
dispatch(
|
||||
caLayerProcessedImageChanged({
|
||||
layerId,
|
||||
imageDTO,
|
||||
})
|
||||
);
|
||||
dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: false }));
|
||||
}
|
||||
// Whew! We made it. Update the layer with the processed image
|
||||
log.debug({ layerId, imageDTO }, 'ControlNet image processed');
|
||||
dispatch(caLayerProcessedImageChanged({ layerId, imageDTO }));
|
||||
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: null }));
|
||||
} catch (error) {
|
||||
log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue'));
|
||||
dispatch(caLayerIsProcessingImageChanged({ layerId, isProcessingImage: false }));
|
||||
if (signal.aborted) {
|
||||
// The listener was canceled - we need to cancel the pending processor batch, if there is one (could have changed by now).
|
||||
const pendingBatchId = getState()
|
||||
.controlLayers.present.layers.filter(isControlAdapterLayer)
|
||||
.find((l) => l.id === layerId)?.controlAdapter.processorPendingBatchId;
|
||||
if (pendingBatchId) {
|
||||
cancelProcessorBatch(dispatch, layerId, pendingBatchId);
|
||||
}
|
||||
log.trace('Control Adapter preprocessor cancelled');
|
||||
} else {
|
||||
// Some other error condition...
|
||||
log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue'));
|
||||
|
||||
if (error instanceof Object) {
|
||||
if ('data' in error && 'status' in error) {
|
||||
if (error.status === 403) {
|
||||
dispatch(caLayerImageChanged({ layerId, imageDTO: null }));
|
||||
return;
|
||||
if (error instanceof Object) {
|
||||
if ('data' in error && 'status' in error) {
|
||||
if (error.status === 403) {
|
||||
dispatch(caLayerImageChanged({ layerId, imageDTO: null }));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('queue.graphFailedToQueue'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('queue.graphFailedToQueue'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
}
|
||||
} finally {
|
||||
req.reset();
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
@@ -8,8 +8,8 @@ import { blobToDataURL } from 'features/canvas/util/blobToDataURL';
|
||||
import { getCanvasData } from 'features/canvas/util/getCanvasData';
|
||||
import { getCanvasGenerationMode } from 'features/canvas/util/getCanvasGenerationMode';
|
||||
import { canvasGraphBuilt } from 'features/nodes/store/actions';
|
||||
import { buildCanvasGraph } from 'features/nodes/util/graph/buildCanvasGraph';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildCanvasGraph } from 'features/nodes/util/graph/canvas/buildCanvasGraph';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { buildGenerationTabGraph } from 'features/nodes/util/graph/buildGenerationTabGraph';
|
||||
import { buildGenerationTabSDXLGraph } from 'features/nodes/util/graph/buildGenerationTabSDXLGraph';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildGenerationTabGraph } from 'features/nodes/util/graph/generation/buildGenerationTabGraph';
|
||||
import { buildGenerationTabSDXLGraph } from 'features/nodes/util/graph/generation/buildGenerationTabSDXLGraph';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
|
||||
export const addEnqueueRequestedLinear = (startAppListening: AppStartListening) => {
|
||||
@@ -18,7 +18,7 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
|
||||
let graph;
|
||||
|
||||
if (model && model.base === 'sdxl') {
|
||||
if (model?.base === 'sdxl') {
|
||||
graph = await buildGenerationTabSDXLGraph(state);
|
||||
} else {
|
||||
graph = await buildGenerationTabGraph(state);
|
||||
@@ -32,7 +32,7 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
})
|
||||
);
|
||||
try {
|
||||
req.unwrap();
|
||||
await req.unwrap();
|
||||
if (shouldShowProgressInViewer) {
|
||||
dispatch(isImageViewerOpenChanged(true));
|
||||
}
|
||||
|
||||
@@ -11,9 +11,9 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
enqueueRequested.match(action) && action.payload.tabName === 'workflows',
|
||||
effect: async (action, { getState, dispatch }) => {
|
||||
const state = getState();
|
||||
const { nodes, edges } = state.nodes;
|
||||
const { nodes, edges } = state.nodes.present;
|
||||
const workflow = state.workflow;
|
||||
const graph = buildNodesGraph(state.nodes);
|
||||
const graph = buildNodesGraph(state.nodes.present);
|
||||
const builtWorkflow = buildWorkflowWithValidation({
|
||||
nodes,
|
||||
edges,
|
||||
@@ -39,7 +39,11 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
req.reset();
|
||||
try {
|
||||
await req.unwrap();
|
||||
} finally {
|
||||
req.reset();
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { nodeTemplatesBuilt } from 'features/nodes/store/nodesSlice';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { parseSchema } from 'features/nodes/util/schema/parseSchema';
|
||||
import { size } from 'lodash-es';
|
||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
@@ -9,7 +9,7 @@ import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
export const addGetOpenAPISchemaListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
matcher: appInfoApi.endpoints.getOpenAPISchema.matchFulfilled,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
effect: (action, { getState }) => {
|
||||
const log = logger('system');
|
||||
const schemaJSON = action.payload;
|
||||
|
||||
@@ -20,7 +20,7 @@ export const addGetOpenAPISchemaListener = (startAppListening: AppStartListening
|
||||
|
||||
log.debug({ nodeTemplates: parseify(nodeTemplates) }, `Built ${size(nodeTemplates)} node templates`);
|
||||
|
||||
dispatch(nodeTemplatesBuilt(nodeTemplates));
|
||||
$templates.set(nodeTemplates);
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ import type { ImageDTO } from 'services/api/types';
|
||||
import { imagesSelectors } from 'services/api/util';
|
||||
|
||||
const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
|
||||
state.nodes.nodes.forEach((node) => {
|
||||
state.nodes.present.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState';
|
||||
import { zNodeStatus } from 'features/nodes/types/invocation';
|
||||
import { socketGeneratorProgress } from 'services/events/actions';
|
||||
|
||||
const log = logger('socketio');
|
||||
@@ -9,6 +12,14 @@ export const addGeneratorProgressEventListener = (startAppListening: AppStartLis
|
||||
actionCreator: socketGeneratorProgress,
|
||||
effect: (action) => {
|
||||
log.trace(action.payload, `Generator progress`);
|
||||
const { source_node_id, step, total_steps, progress_image } = action.payload.data;
|
||||
const nes = deepClone($nodeExecutionStates.get()[source_node_id]);
|
||||
if (nes) {
|
||||
nes.status = zNodeStatus.enum.IN_PROGRESS;
|
||||
nes.progress = (step + 1) / total_steps;
|
||||
nes.progressImage = progress_image ?? null;
|
||||
upsertExecutionState(nes.nodeId, nes);
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
|
||||
import {
|
||||
@@ -9,7 +10,9 @@ import {
|
||||
isImageViewerOpenChanged,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import { IMAGE_CATEGORIES } from 'features/gallery/store/types';
|
||||
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState';
|
||||
import { isImageOutput } from 'features/nodes/types/common';
|
||||
import { zNodeStatus } from 'features/nodes/types/invocation';
|
||||
import { CANVAS_OUTPUT } from 'features/nodes/util/graph/constants';
|
||||
import { boardsApi } from 'services/api/endpoints/boards';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
@@ -28,7 +31,7 @@ export const addInvocationCompleteEventListener = (startAppListening: AppStartLi
|
||||
const { data } = action.payload;
|
||||
log.debug({ data: parseify(data) }, `Invocation complete (${action.payload.data.node.type})`);
|
||||
|
||||
const { result, node, queue_batch_id } = data;
|
||||
const { result, node, queue_batch_id, source_node_id } = data;
|
||||
// This complete event has an associated image output
|
||||
if (isImageOutput(result) && !nodeTypeDenylist.includes(node.type)) {
|
||||
const { image_name } = result.image;
|
||||
@@ -110,6 +113,16 @@ export const addInvocationCompleteEventListener = (startAppListening: AppStartLi
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const nes = deepClone($nodeExecutionStates.get()[source_node_id]);
|
||||
if (nes) {
|
||||
nes.status = zNodeStatus.enum.COMPLETED;
|
||||
if (nes.progress !== null) {
|
||||
nes.progress = 1;
|
||||
}
|
||||
nes.outputs.push(result);
|
||||
upsertExecutionState(nes.nodeId, nes);
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState';
|
||||
import { zNodeStatus } from 'features/nodes/types/invocation';
|
||||
import { socketInvocationError } from 'services/events/actions';
|
||||
|
||||
const log = logger('socketio');
|
||||
@@ -9,6 +12,15 @@ export const addInvocationErrorEventListener = (startAppListening: AppStartListe
|
||||
actionCreator: socketInvocationError,
|
||||
effect: (action) => {
|
||||
log.error(action.payload, `Invocation error (${action.payload.data.node.type})`);
|
||||
const { source_node_id } = action.payload.data;
|
||||
const nes = deepClone($nodeExecutionStates.get()[source_node_id]);
|
||||
if (nes) {
|
||||
nes.status = zNodeStatus.enum.FAILED;
|
||||
nes.error = action.payload.data.error;
|
||||
nes.progress = null;
|
||||
nes.progressImage = null;
|
||||
upsertExecutionState(nes.nodeId, nes);
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState';
|
||||
import { zNodeStatus } from 'features/nodes/types/invocation';
|
||||
import { socketInvocationStarted } from 'services/events/actions';
|
||||
|
||||
const log = logger('socketio');
|
||||
@@ -9,6 +12,12 @@ export const addInvocationStartedEventListener = (startAppListening: AppStartLis
|
||||
actionCreator: socketInvocationStarted,
|
||||
effect: (action) => {
|
||||
log.debug(action.payload, `Invocation started (${action.payload.data.node.type})`);
|
||||
const { source_node_id } = action.payload.data;
|
||||
const nes = deepClone($nodeExecutionStates.get()[source_node_id]);
|
||||
if (nes) {
|
||||
nes.status = zNodeStatus.enum.IN_PROGRESS;
|
||||
upsertExecutionState(nes.nodeId, nes);
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { $nodeExecutionStates } from 'features/nodes/hooks/useExecutionState';
|
||||
import { zNodeStatus } from 'features/nodes/types/invocation';
|
||||
import { forEach } from 'lodash-es';
|
||||
import { queueApi, queueItemsAdapter } from 'services/api/endpoints/queue';
|
||||
import { socketQueueItemStatusChanged } from 'services/events/actions';
|
||||
|
||||
@@ -54,6 +58,21 @@ export const addSocketQueueItemStatusChangedEventListener = (startAppListening:
|
||||
dispatch(
|
||||
queueApi.util.invalidateTags(['CurrentSessionQueueItem', 'NextSessionQueueItem', 'InvocationCacheStatus'])
|
||||
);
|
||||
|
||||
if (['in_progress'].includes(action.payload.data.queue_item.status)) {
|
||||
forEach($nodeExecutionStates.get(), (nes) => {
|
||||
if (!nes) {
|
||||
return;
|
||||
}
|
||||
const clone = deepClone(nes);
|
||||
clone.status = zNodeStatus.enum.PENDING;
|
||||
clone.error = null;
|
||||
clone.progress = null;
|
||||
clone.progressImage = null;
|
||||
clone.outputs = [];
|
||||
$nodeExecutionStates.setKey(clone.nodeId, clone);
|
||||
});
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { updateAllNodesRequested } from 'features/nodes/store/actions';
|
||||
import { nodeReplaced } from 'features/nodes/store/nodesSlice';
|
||||
import { $templates, nodesChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { NodeUpdateError } from 'features/nodes/types/error';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { getNeedsUpdate, updateNode } from 'features/nodes/util/node/nodeUpdate';
|
||||
@@ -14,7 +14,8 @@ export const addUpdateAllNodesRequestedListener = (startAppListening: AppStartLi
|
||||
actionCreator: updateAllNodesRequested,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
const log = logger('nodes');
|
||||
const { nodes, templates } = getState().nodes;
|
||||
const { nodes } = getState().nodes.present;
|
||||
const templates = $templates.get();
|
||||
|
||||
let unableToUpdateCount = 0;
|
||||
|
||||
@@ -24,13 +25,18 @@ export const addUpdateAllNodesRequestedListener = (startAppListening: AppStartLi
|
||||
unableToUpdateCount++;
|
||||
return;
|
||||
}
|
||||
if (!getNeedsUpdate(node, template)) {
|
||||
if (!getNeedsUpdate(node.data, template)) {
|
||||
// No need to increment the count here, since we're not actually updating
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const updatedNode = updateNode(node, template);
|
||||
dispatch(nodeReplaced({ nodeId: updatedNode.id, node: updatedNode }));
|
||||
dispatch(
|
||||
nodesChanged([
|
||||
{ type: 'remove', id: updatedNode.id },
|
||||
{ type: 'add', item: updatedNode },
|
||||
])
|
||||
);
|
||||
} catch (e) {
|
||||
if (e instanceof NodeUpdateError) {
|
||||
unableToUpdateCount++;
|
||||
|
||||
@@ -2,32 +2,51 @@ import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { workflowLoaded, workflowLoadRequested } from 'features/nodes/store/actions';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { $flow } from 'features/nodes/store/reactFlowInstance';
|
||||
import type { Templates } from 'features/nodes/store/types';
|
||||
import { WorkflowMigrationError, WorkflowVersionError } from 'features/nodes/types/error';
|
||||
import { graphToWorkflow } from 'features/nodes/util/workflow/graphToWorkflow';
|
||||
import { validateWorkflow } from 'features/nodes/util/workflow/validateWorkflow';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { makeToast } from 'features/system/util/makeToast';
|
||||
import { t } from 'i18next';
|
||||
import type { GraphAndWorkflowResponse, NonNullableGraph } from 'services/api/types';
|
||||
import { z } from 'zod';
|
||||
import { fromZodError } from 'zod-validation-error';
|
||||
|
||||
const getWorkflow = (data: GraphAndWorkflowResponse, templates: Templates) => {
|
||||
if (data.workflow) {
|
||||
// Prefer to load the workflow if it's available - it has more information
|
||||
const parsed = JSON.parse(data.workflow);
|
||||
return validateWorkflow(parsed, templates);
|
||||
} else if (data.graph) {
|
||||
// Else we fall back on the graph, using the graphToWorkflow function to convert and do layout
|
||||
const parsed = JSON.parse(data.graph);
|
||||
const workflow = graphToWorkflow(parsed as NonNullableGraph, true);
|
||||
return validateWorkflow(workflow, templates);
|
||||
} else {
|
||||
throw new Error('No workflow or graph provided');
|
||||
}
|
||||
};
|
||||
|
||||
export const addWorkflowLoadRequestedListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
actionCreator: workflowLoadRequested,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
effect: (action, { dispatch }) => {
|
||||
const log = logger('nodes');
|
||||
const { workflow, asCopy } = action.payload;
|
||||
const nodeTemplates = getState().nodes.templates;
|
||||
const { data, asCopy } = action.payload;
|
||||
const nodeTemplates = $templates.get();
|
||||
|
||||
try {
|
||||
const { workflow: validatedWorkflow, warnings } = validateWorkflow(workflow, nodeTemplates);
|
||||
const { workflow, warnings } = getWorkflow(data, nodeTemplates);
|
||||
|
||||
if (asCopy) {
|
||||
// If we're loading a copy, we need to remove the ID so that the backend will create a new workflow
|
||||
delete validatedWorkflow.id;
|
||||
delete workflow.id;
|
||||
}
|
||||
|
||||
dispatch(workflowLoaded(validatedWorkflow));
|
||||
dispatch(workflowLoaded(workflow));
|
||||
if (!warnings.length) {
|
||||
dispatch(
|
||||
addToast(
|
||||
|
||||
@@ -21,7 +21,8 @@ import { galleryPersistConfig, gallerySlice } from 'features/gallery/store/galle
|
||||
import { hrfPersistConfig, hrfSlice } from 'features/hrf/store/hrfSlice';
|
||||
import { loraPersistConfig, loraSlice } from 'features/lora/store/loraSlice';
|
||||
import { modelManagerV2PersistConfig, modelManagerV2Slice } from 'features/modelManagerV2/store/modelManagerV2Slice';
|
||||
import { nodesPersistConfig, nodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import { nodesPersistConfig, nodesSlice, nodesUndoableConfig } from 'features/nodes/store/nodesSlice';
|
||||
import { workflowSettingsPersistConfig, workflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { workflowPersistConfig, workflowSlice } from 'features/nodes/store/workflowSlice';
|
||||
import { generationPersistConfig, generationSlice } from 'features/parameters/store/generationSlice';
|
||||
import { postprocessingPersistConfig, postprocessingSlice } from 'features/parameters/store/postprocessingSlice';
|
||||
@@ -50,7 +51,7 @@ const allReducers = {
|
||||
[canvasSlice.name]: canvasSlice.reducer,
|
||||
[gallerySlice.name]: gallerySlice.reducer,
|
||||
[generationSlice.name]: generationSlice.reducer,
|
||||
[nodesSlice.name]: nodesSlice.reducer,
|
||||
[nodesSlice.name]: undoable(nodesSlice.reducer, nodesUndoableConfig),
|
||||
[postprocessingSlice.name]: postprocessingSlice.reducer,
|
||||
[systemSlice.name]: systemSlice.reducer,
|
||||
[configSlice.name]: configSlice.reducer,
|
||||
@@ -66,6 +67,7 @@ const allReducers = {
|
||||
[workflowSlice.name]: workflowSlice.reducer,
|
||||
[hrfSlice.name]: hrfSlice.reducer,
|
||||
[controlLayersSlice.name]: undoable(controlLayersSlice.reducer, controlLayersUndoableConfig),
|
||||
[workflowSettingsSlice.name]: workflowSettingsSlice.reducer,
|
||||
[api.reducerPath]: api.reducer,
|
||||
};
|
||||
|
||||
@@ -111,6 +113,7 @@ const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = {
|
||||
[modelManagerV2PersistConfig.name]: modelManagerV2PersistConfig,
|
||||
[hrfPersistConfig.name]: hrfPersistConfig,
|
||||
[controlLayersPersistConfig.name]: controlLayersPersistConfig,
|
||||
[workflowSettingsPersistConfig.name]: workflowSettingsPersistConfig,
|
||||
};
|
||||
|
||||
const unserialize: UnserializeFunction = (data, key) => {
|
||||
|
||||
@@ -13,6 +13,7 @@ type UseGroupedModelComboboxArg<T extends AnyModelConfig> = {
|
||||
onChange: (value: T | null) => void;
|
||||
getIsDisabled?: (model: T) => boolean;
|
||||
isLoading?: boolean;
|
||||
groupByType?: boolean;
|
||||
};
|
||||
|
||||
type UseGroupedModelComboboxReturn = {
|
||||
@@ -23,17 +24,21 @@ type UseGroupedModelComboboxReturn = {
|
||||
noOptionsMessage: () => string;
|
||||
};
|
||||
|
||||
const groupByBaseFunc = <T extends AnyModelConfig>(model: T) => model.base.toUpperCase();
|
||||
const groupByBaseAndTypeFunc = <T extends AnyModelConfig>(model: T) =>
|
||||
`${model.base.toUpperCase()} / ${model.type.replaceAll('_', ' ').toUpperCase()}`;
|
||||
|
||||
export const useGroupedModelCombobox = <T extends AnyModelConfig>(
|
||||
arg: UseGroupedModelComboboxArg<T>
|
||||
): UseGroupedModelComboboxReturn => {
|
||||
const { t } = useTranslation();
|
||||
const base_model = useAppSelector((s) => s.generation.model?.base ?? 'sdxl');
|
||||
const { modelConfigs, selectedModel, getIsDisabled, onChange, isLoading } = arg;
|
||||
const { modelConfigs, selectedModel, getIsDisabled, onChange, isLoading, groupByType = false } = arg;
|
||||
const options = useMemo<GroupBase<ComboboxOption>[]>(() => {
|
||||
if (!modelConfigs) {
|
||||
return [];
|
||||
}
|
||||
const groupedModels = groupBy(modelConfigs, 'base');
|
||||
const groupedModels = groupBy(modelConfigs, groupByType ? groupByBaseAndTypeFunc : groupByBaseFunc);
|
||||
const _options = reduce(
|
||||
groupedModels,
|
||||
(acc, val, label) => {
|
||||
@@ -49,9 +54,9 @@ export const useGroupedModelCombobox = <T extends AnyModelConfig>(
|
||||
},
|
||||
[] as GroupBase<ComboboxOption>[]
|
||||
);
|
||||
_options.sort((a) => (a.label === base_model ? -1 : 1));
|
||||
_options.sort((a) => (a.label?.split('/')[0]?.toLowerCase().includes(base_model) ? -1 : 1));
|
||||
return _options;
|
||||
}, [getIsDisabled, modelConfigs, base_model]);
|
||||
}, [modelConfigs, groupByType, getIsDisabled, base_model]);
|
||||
|
||||
const value = useMemo(
|
||||
() =>
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import {
|
||||
@@ -6,187 +7,230 @@ import {
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
|
||||
import { selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { Layer } from 'features/controlLayers/store/types';
|
||||
import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
|
||||
import { selectNodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import { $templates, selectNodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import type { Templates } from 'features/nodes/store/types';
|
||||
import { selectWorkflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { selectGenerationSlice } from 'features/parameters/store/generationSlice';
|
||||
import { selectSystemSlice } from 'features/system/store/systemSlice';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import i18n from 'i18next';
|
||||
import { forEach } from 'lodash-es';
|
||||
import { forEach, upperFirst } from 'lodash-es';
|
||||
import { useMemo } from 'react';
|
||||
import { getConnectedEdges } from 'reactflow';
|
||||
|
||||
const selector = createMemoizedSelector(
|
||||
[
|
||||
selectControlAdaptersSlice,
|
||||
selectGenerationSlice,
|
||||
selectSystemSlice,
|
||||
selectNodesSlice,
|
||||
selectDynamicPromptsSlice,
|
||||
selectControlLayersSlice,
|
||||
activeTabNameSelector,
|
||||
],
|
||||
(controlAdapters, generation, system, nodes, dynamicPrompts, controlLayers, activeTabName) => {
|
||||
const { model } = generation;
|
||||
const { positivePrompt } = controlLayers.present;
|
||||
const LAYER_TYPE_TO_TKEY: Record<Layer['type'], string> = {
|
||||
initial_image_layer: 'controlLayers.globalInitialImage',
|
||||
control_adapter_layer: 'controlLayers.globalControlAdapter',
|
||||
ip_adapter_layer: 'controlLayers.globalIPAdapter',
|
||||
regional_guidance_layer: 'controlLayers.regionalGuidance',
|
||||
};
|
||||
|
||||
const { isConnected } = system;
|
||||
const createSelector = (templates: Templates) =>
|
||||
createMemoizedSelector(
|
||||
[
|
||||
selectControlAdaptersSlice,
|
||||
selectGenerationSlice,
|
||||
selectSystemSlice,
|
||||
selectNodesSlice,
|
||||
selectWorkflowSettingsSlice,
|
||||
selectDynamicPromptsSlice,
|
||||
selectControlLayersSlice,
|
||||
activeTabNameSelector,
|
||||
],
|
||||
(controlAdapters, generation, system, nodes, workflowSettings, dynamicPrompts, controlLayers, activeTabName) => {
|
||||
const { model } = generation;
|
||||
const { size } = controlLayers.present;
|
||||
const { positivePrompt } = controlLayers.present;
|
||||
|
||||
const reasons: string[] = [];
|
||||
const { isConnected } = system;
|
||||
|
||||
// Cannot generate if not connected
|
||||
if (!isConnected) {
|
||||
reasons.push(i18n.t('parameters.invoke.systemDisconnected'));
|
||||
}
|
||||
const reasons: { prefix?: string; content: string }[] = [];
|
||||
|
||||
if (activeTabName === 'workflows') {
|
||||
if (nodes.shouldValidateGraph) {
|
||||
if (!nodes.nodes.length) {
|
||||
reasons.push(i18n.t('parameters.invoke.noNodesInGraph'));
|
||||
// Cannot generate if not connected
|
||||
if (!isConnected) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.systemDisconnected') });
|
||||
}
|
||||
|
||||
if (activeTabName === 'workflows') {
|
||||
if (workflowSettings.shouldValidateGraph) {
|
||||
if (!nodes.nodes.length) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noNodesInGraph') });
|
||||
}
|
||||
|
||||
nodes.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const nodeTemplate = templates[node.data.type];
|
||||
|
||||
if (!nodeTemplate) {
|
||||
// Node type not found
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingNodeTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
const connectedEdges = getConnectedEdges([node], nodes.edges);
|
||||
|
||||
forEach(node.data.inputs, (field) => {
|
||||
const fieldTemplate = nodeTemplate.inputs[field.name];
|
||||
const hasConnection = connectedEdges.some(
|
||||
(edge) => edge.target === node.id && edge.targetHandle === field.name
|
||||
);
|
||||
|
||||
if (!fieldTemplate) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingFieldTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
if (fieldTemplate.required && field.value === undefined && !hasConnection) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.missingInputForField', {
|
||||
nodeLabel: node.data.label || nodeTemplate.title,
|
||||
fieldLabel: field.label || fieldTemplate.title,
|
||||
}),
|
||||
});
|
||||
return;
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (dynamicPrompts.prompts.length === 0 && getShouldProcessPrompt(positivePrompt)) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noPrompts') });
|
||||
}
|
||||
|
||||
nodes.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
if (!model) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelSelected') });
|
||||
}
|
||||
|
||||
const nodeTemplate = nodes.templates[node.data.type];
|
||||
if (activeTabName === 'generation') {
|
||||
// Handling for generation tab
|
||||
controlLayers.present.layers
|
||||
.filter((l) => l.isEnabled)
|
||||
.forEach((l, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layers_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[l.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
if (l.type === 'control_adapter_layer') {
|
||||
// Must have model
|
||||
if (!l.controlAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (l.controlAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have a control image OR, if it has a processor, it must have a processed image
|
||||
if (!l.controlAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterNoImageSelected'));
|
||||
} else if (l.controlAdapter.processorConfig && !l.controlAdapter.processedImage) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterImageNotProcessed'));
|
||||
}
|
||||
// T2I Adapters require images have dimensions that are multiples of 64 (SD1.5) or 32 (SDXL)
|
||||
if (l.controlAdapter.type === 't2i_adapter') {
|
||||
const multiple = model?.base === 'sdxl' ? 32 : 64;
|
||||
if (size.width % multiple !== 0 || size.height % multiple !== 0) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.t2iAdapterIncompatibleDimensions', { multiple }));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!nodeTemplate) {
|
||||
// Node type not found
|
||||
reasons.push(i18n.t('parameters.invoke.missingNodeTemplate'));
|
||||
return;
|
||||
}
|
||||
if (l.type === 'ip_adapter_layer') {
|
||||
// Must have model
|
||||
if (!l.ipAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (l.ipAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have an image
|
||||
if (!l.ipAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoImageSelected'));
|
||||
}
|
||||
}
|
||||
|
||||
const connectedEdges = getConnectedEdges([node], nodes.edges);
|
||||
if (l.type === 'initial_image_layer') {
|
||||
// Must have an image
|
||||
if (!l.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.initialImageNoImageSelected'));
|
||||
}
|
||||
}
|
||||
|
||||
forEach(node.data.inputs, (field) => {
|
||||
const fieldTemplate = nodeTemplate.inputs[field.name];
|
||||
const hasConnection = connectedEdges.some(
|
||||
(edge) => edge.target === node.id && edge.targetHandle === field.name
|
||||
);
|
||||
if (l.type === 'regional_guidance_layer') {
|
||||
// Must have a region
|
||||
if (l.maskObjects.length === 0) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.rgNoRegion'));
|
||||
}
|
||||
// Must have at least 1 prompt or IP Adapter
|
||||
if (l.positivePrompt === null && l.negativePrompt === null && l.ipAdapters.length === 0) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.rgNoPromptsOrIPAdapters'));
|
||||
}
|
||||
l.ipAdapters.forEach((ipAdapter) => {
|
||||
// Must have model
|
||||
if (!ipAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (ipAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have an image
|
||||
if (!ipAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoImageSelected'));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (!fieldTemplate) {
|
||||
reasons.push(i18n.t('parameters.invoke.missingFieldTemplate'));
|
||||
return;
|
||||
}
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Handling for all other tabs
|
||||
selectControlAdapterAll(controlAdapters)
|
||||
.filter((ca) => ca.isEnabled)
|
||||
.forEach((ca, i) => {
|
||||
if (!ca.isEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (fieldTemplate.required && field.value === undefined && !hasConnection) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.missingInputForField', {
|
||||
nodeLabel: node.data.label || nodeTemplate.title,
|
||||
fieldLabel: field.label || fieldTemplate.title,
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (dynamicPrompts.prompts.length === 0 && getShouldProcessPrompt(positivePrompt)) {
|
||||
reasons.push(i18n.t('parameters.invoke.noPrompts'));
|
||||
if (!ca.model) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelForControlAdapter', { number: i + 1 }) });
|
||||
} else if (ca.model.base !== model?.base) {
|
||||
// This should never happen, just a sanity check
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', { number: i + 1 }),
|
||||
});
|
||||
}
|
||||
|
||||
if (
|
||||
!ca.controlImage ||
|
||||
(isControlNetOrT2IAdapter(ca) && !ca.processedControlImage && ca.processorType !== 'none')
|
||||
) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.noControlImageForControlAdapter', { number: i + 1 }),
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (!model) {
|
||||
reasons.push(i18n.t('parameters.invoke.noModelSelected'));
|
||||
}
|
||||
|
||||
if (activeTabName === 'generation') {
|
||||
// Handling for generation tab
|
||||
controlLayers.present.layers
|
||||
.filter((l) => l.isEnabled)
|
||||
.flatMap((l) => {
|
||||
if (l.type === 'control_adapter_layer') {
|
||||
return l.controlAdapter;
|
||||
} else if (l.type === 'ip_adapter_layer') {
|
||||
return l.ipAdapter;
|
||||
} else if (l.type === 'regional_guidance_layer') {
|
||||
return l.ipAdapters;
|
||||
}
|
||||
return [];
|
||||
})
|
||||
.forEach((ca, i) => {
|
||||
const hasNoModel = !ca.model;
|
||||
const mismatchedModelBase = ca.model?.base !== model?.base;
|
||||
const hasNoImage = !ca.image;
|
||||
const imageNotProcessed =
|
||||
(ca.type === 'controlnet' || ca.type === 't2i_adapter') && !ca.processedImage && ca.processorConfig;
|
||||
|
||||
if (hasNoModel) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
if (mismatchedModelBase) {
|
||||
// This should never happen, just a sanity check
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
if (hasNoImage) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noControlImageForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
if (imageNotProcessed) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.imageNotProcessedForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Handling for all other tabs
|
||||
selectControlAdapterAll(controlAdapters)
|
||||
.filter((ca) => ca.isEnabled)
|
||||
.forEach((ca, i) => {
|
||||
if (!ca.isEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ca.model) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
} else if (ca.model.base !== model?.base) {
|
||||
// This should never happen, just a sanity check
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
!ca.controlImage ||
|
||||
(isControlNetOrT2IAdapter(ca) && !ca.processedControlImage && ca.processorType !== 'none')
|
||||
) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noControlImageForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
return { isReady: !reasons.length, reasons };
|
||||
}
|
||||
|
||||
return { isReady: !reasons.length, reasons };
|
||||
}
|
||||
);
|
||||
);
|
||||
|
||||
export const useIsReadyToEnqueue = () => {
|
||||
const { isReady, reasons } = useAppSelector(selector);
|
||||
return { isReady, reasons };
|
||||
const templates = useStore($templates);
|
||||
const selector = useMemo(() => createSelector(templates), [templates]);
|
||||
const value = useAppSelector(selector);
|
||||
return value;
|
||||
};
|
||||
|
||||
@@ -21,7 +21,6 @@ import {
|
||||
setShouldShowBoundingBox,
|
||||
} from 'features/canvas/store/canvasSlice';
|
||||
import type { CanvasLayer } from 'features/canvas/store/canvasTypes';
|
||||
import { LAYER_NAMES_DICT } from 'features/canvas/store/canvasTypes';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useHotkeys } from 'react-hotkeys-hook';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -216,13 +215,20 @@ const IAICanvasToolbar = () => {
|
||||
[dispatch, isMaskEnabled]
|
||||
);
|
||||
|
||||
const value = useMemo(() => LAYER_NAMES_DICT.filter((o) => o.value === layer)[0], [layer]);
|
||||
const layerOptions = useMemo<{ label: string; value: CanvasLayer }[]>(
|
||||
() => [
|
||||
{ label: t('unifiedCanvas.base'), value: 'base' },
|
||||
{ label: t('unifiedCanvas.mask'), value: 'mask' },
|
||||
],
|
||||
[t]
|
||||
);
|
||||
const layerValue = useMemo(() => layerOptions.filter((o) => o.value === layer)[0] ?? null, [layer, layerOptions]);
|
||||
|
||||
return (
|
||||
<Flex alignItems="center" gap={2} flexWrap="wrap">
|
||||
<Tooltip label={`${t('unifiedCanvas.layer')} (Q)`}>
|
||||
<FormControl isDisabled={isStaging} w="5rem">
|
||||
<Combobox value={value} options={LAYER_NAMES_DICT} onChange={handleChangeLayer} />
|
||||
<Combobox value={layerValue} options={layerOptions} onChange={handleChangeLayer} />
|
||||
</FormControl>
|
||||
</Tooltip>
|
||||
|
||||
|
||||
@@ -5,11 +5,6 @@ import { z } from 'zod';
|
||||
|
||||
export type CanvasLayer = 'base' | 'mask';
|
||||
|
||||
export const LAYER_NAMES_DICT: { label: string; value: CanvasLayer }[] = [
|
||||
{ label: 'Base', value: 'base' },
|
||||
{ label: 'Mask', value: 'mask' },
|
||||
];
|
||||
|
||||
const zBoundingBoxScaleMethod = z.enum(['none', 'auto', 'manual']);
|
||||
export type BoundingBoxScaleMethod = z.infer<typeof zBoundingBoxScaleMethod>;
|
||||
export const isBoundingBoxScaleMethod = (v: unknown): v is BoundingBoxScaleMethod =>
|
||||
|
||||
@@ -5,22 +5,7 @@ import type {
|
||||
ParameterT2IAdapterModel,
|
||||
} from 'features/parameters/types/parameterSchemas';
|
||||
import type { components } from 'services/api/schema';
|
||||
import type {
|
||||
CannyImageProcessorInvocation,
|
||||
ColorMapImageProcessorInvocation,
|
||||
ContentShuffleImageProcessorInvocation,
|
||||
DepthAnythingImageProcessorInvocation,
|
||||
DWOpenposeImageProcessorInvocation,
|
||||
HedImageProcessorInvocation,
|
||||
LineartAnimeImageProcessorInvocation,
|
||||
LineartImageProcessorInvocation,
|
||||
MediapipeFaceProcessorInvocation,
|
||||
MidasDepthImageProcessorInvocation,
|
||||
MlsdImageProcessorInvocation,
|
||||
NormalbaeImageProcessorInvocation,
|
||||
PidiImageProcessorInvocation,
|
||||
ZoeDepthImageProcessorInvocation,
|
||||
} from 'services/api/types';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import type { O } from 'ts-toolbelt';
|
||||
import { z } from 'zod';
|
||||
|
||||
@@ -28,20 +13,20 @@ import { z } from 'zod';
|
||||
* Any ControlNet processor node
|
||||
*/
|
||||
export type ControlAdapterProcessorNode =
|
||||
| CannyImageProcessorInvocation
|
||||
| ColorMapImageProcessorInvocation
|
||||
| ContentShuffleImageProcessorInvocation
|
||||
| DepthAnythingImageProcessorInvocation
|
||||
| HedImageProcessorInvocation
|
||||
| LineartAnimeImageProcessorInvocation
|
||||
| LineartImageProcessorInvocation
|
||||
| MediapipeFaceProcessorInvocation
|
||||
| MidasDepthImageProcessorInvocation
|
||||
| MlsdImageProcessorInvocation
|
||||
| NormalbaeImageProcessorInvocation
|
||||
| DWOpenposeImageProcessorInvocation
|
||||
| PidiImageProcessorInvocation
|
||||
| ZoeDepthImageProcessorInvocation;
|
||||
| Invocation<'canny_image_processor'>
|
||||
| Invocation<'color_map_image_processor'>
|
||||
| Invocation<'content_shuffle_image_processor'>
|
||||
| Invocation<'depth_anything_image_processor'>
|
||||
| Invocation<'hed_image_processor'>
|
||||
| Invocation<'lineart_anime_image_processor'>
|
||||
| Invocation<'lineart_image_processor'>
|
||||
| Invocation<'mediapipe_face_processor'>
|
||||
| Invocation<'midas_depth_image_processor'>
|
||||
| Invocation<'mlsd_image_processor'>
|
||||
| Invocation<'normalbae_image_processor'>
|
||||
| Invocation<'dw_openpose_image_processor'>
|
||||
| Invocation<'pidi_image_processor'>
|
||||
| Invocation<'zoe_depth_image_processor'>;
|
||||
|
||||
/**
|
||||
* Any ControlNet processor type
|
||||
@@ -71,7 +56,7 @@ export const isControlAdapterProcessorType = (v: unknown): v is ControlAdapterPr
|
||||
* The Canny processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredCannyImageProcessorInvocation = O.Required<
|
||||
CannyImageProcessorInvocation,
|
||||
Invocation<'canny_image_processor'>,
|
||||
'type' | 'low_threshold' | 'high_threshold' | 'image_resolution' | 'detect_resolution'
|
||||
>;
|
||||
|
||||
@@ -79,7 +64,7 @@ export type RequiredCannyImageProcessorInvocation = O.Required<
|
||||
* The Color Map processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredColorMapImageProcessorInvocation = O.Required<
|
||||
ColorMapImageProcessorInvocation,
|
||||
Invocation<'color_map_image_processor'>,
|
||||
'type' | 'color_map_tile_size'
|
||||
>;
|
||||
|
||||
@@ -87,7 +72,7 @@ export type RequiredColorMapImageProcessorInvocation = O.Required<
|
||||
* The ContentShuffle processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredContentShuffleImageProcessorInvocation = O.Required<
|
||||
ContentShuffleImageProcessorInvocation,
|
||||
Invocation<'content_shuffle_image_processor'>,
|
||||
'type' | 'detect_resolution' | 'image_resolution' | 'w' | 'h' | 'f'
|
||||
>;
|
||||
|
||||
@@ -95,7 +80,7 @@ export type RequiredContentShuffleImageProcessorInvocation = O.Required<
|
||||
* The DepthAnything processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredDepthAnythingImageProcessorInvocation = O.Required<
|
||||
DepthAnythingImageProcessorInvocation,
|
||||
Invocation<'depth_anything_image_processor'>,
|
||||
'type' | 'model_size' | 'resolution' | 'offload'
|
||||
>;
|
||||
|
||||
@@ -108,7 +93,7 @@ export const isDepthAnythingModelSize = (v: unknown): v is DepthAnythingModelSiz
|
||||
* The HED processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredHedImageProcessorInvocation = O.Required<
|
||||
HedImageProcessorInvocation,
|
||||
Invocation<'hed_image_processor'>,
|
||||
'type' | 'detect_resolution' | 'image_resolution' | 'scribble'
|
||||
>;
|
||||
|
||||
@@ -116,7 +101,7 @@ export type RequiredHedImageProcessorInvocation = O.Required<
|
||||
* The Lineart Anime processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredLineartAnimeImageProcessorInvocation = O.Required<
|
||||
LineartAnimeImageProcessorInvocation,
|
||||
Invocation<'lineart_anime_image_processor'>,
|
||||
'type' | 'detect_resolution' | 'image_resolution'
|
||||
>;
|
||||
|
||||
@@ -124,7 +109,7 @@ export type RequiredLineartAnimeImageProcessorInvocation = O.Required<
|
||||
* The Lineart processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredLineartImageProcessorInvocation = O.Required<
|
||||
LineartImageProcessorInvocation,
|
||||
Invocation<'lineart_image_processor'>,
|
||||
'type' | 'detect_resolution' | 'image_resolution' | 'coarse'
|
||||
>;
|
||||
|
||||
@@ -132,7 +117,7 @@ export type RequiredLineartImageProcessorInvocation = O.Required<
|
||||
* The MediapipeFace processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredMediapipeFaceProcessorInvocation = O.Required<
|
||||
MediapipeFaceProcessorInvocation,
|
||||
Invocation<'mediapipe_face_processor'>,
|
||||
'type' | 'max_faces' | 'min_confidence' | 'image_resolution' | 'detect_resolution'
|
||||
>;
|
||||
|
||||
@@ -140,7 +125,7 @@ export type RequiredMediapipeFaceProcessorInvocation = O.Required<
|
||||
* The MidasDepth processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredMidasDepthImageProcessorInvocation = O.Required<
|
||||
MidasDepthImageProcessorInvocation,
|
||||
Invocation<'midas_depth_image_processor'>,
|
||||
'type' | 'a_mult' | 'bg_th' | 'image_resolution' | 'detect_resolution'
|
||||
>;
|
||||
|
||||
@@ -148,7 +133,7 @@ export type RequiredMidasDepthImageProcessorInvocation = O.Required<
|
||||
* The MLSD processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredMlsdImageProcessorInvocation = O.Required<
|
||||
MlsdImageProcessorInvocation,
|
||||
Invocation<'mlsd_image_processor'>,
|
||||
'type' | 'detect_resolution' | 'image_resolution' | 'thr_v' | 'thr_d'
|
||||
>;
|
||||
|
||||
@@ -156,7 +141,7 @@ export type RequiredMlsdImageProcessorInvocation = O.Required<
|
||||
* The NormalBae processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredNormalbaeImageProcessorInvocation = O.Required<
|
||||
NormalbaeImageProcessorInvocation,
|
||||
Invocation<'normalbae_image_processor'>,
|
||||
'type' | 'detect_resolution' | 'image_resolution'
|
||||
>;
|
||||
|
||||
@@ -164,7 +149,7 @@ export type RequiredNormalbaeImageProcessorInvocation = O.Required<
|
||||
* The DW Openpose processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredDWOpenposeImageProcessorInvocation = O.Required<
|
||||
DWOpenposeImageProcessorInvocation,
|
||||
Invocation<'dw_openpose_image_processor'>,
|
||||
'type' | 'image_resolution' | 'draw_body' | 'draw_face' | 'draw_hands'
|
||||
>;
|
||||
|
||||
@@ -172,14 +157,14 @@ export type RequiredDWOpenposeImageProcessorInvocation = O.Required<
|
||||
* The Pidi processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredPidiImageProcessorInvocation = O.Required<
|
||||
PidiImageProcessorInvocation,
|
||||
Invocation<'pidi_image_processor'>,
|
||||
'type' | 'detect_resolution' | 'image_resolution' | 'safe' | 'scribble'
|
||||
>;
|
||||
|
||||
/**
|
||||
* The ZoeDepth processor node, with parameters flagged as required
|
||||
*/
|
||||
export type RequiredZoeDepthImageProcessorInvocation = O.Required<ZoeDepthImageProcessorInvocation, 'type'>;
|
||||
export type RequiredZoeDepthImageProcessorInvocation = O.Required<Invocation<'zoe_depth_image_processor'>, 'type'>;
|
||||
|
||||
/**
|
||||
* Any ControlNet Processor node, with its parameters flagged as required
|
||||
|
||||
@@ -124,7 +124,7 @@ export const ControlAdapterImagePreview = memo(
|
||||
controlImage &&
|
||||
processedControlImage &&
|
||||
!isMouseOverImage &&
|
||||
!controlAdapter.isProcessingImage &&
|
||||
!controlAdapter.processorPendingBatchId &&
|
||||
controlAdapter.processorConfig !== null;
|
||||
|
||||
useEffect(() => {
|
||||
@@ -190,7 +190,7 @@ export const ControlAdapterImagePreview = memo(
|
||||
/>
|
||||
</>
|
||||
|
||||
{controlAdapter.isProcessingImage && (
|
||||
{controlAdapter.processorPendingBatchId !== null && (
|
||||
<Flex
|
||||
position="absolute"
|
||||
top={0}
|
||||
|
||||
@@ -42,6 +42,7 @@ export const ControlAdapterModelCombobox = memo(({ modelKey, onChange: onChangeM
|
||||
selectedModel,
|
||||
getIsDisabled,
|
||||
isLoading,
|
||||
groupByType: true,
|
||||
});
|
||||
|
||||
return (
|
||||
|
||||
@@ -2,14 +2,13 @@ import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { DepthAnythingModelSize, DepthAnythingProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { CA_PROCESSOR_DATA, isDepthAnythingModelSize } from 'features/controlLayers/util/controlAdapters';
|
||||
import { isDepthAnythingModelSize } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<DepthAnythingProcessorConfig>;
|
||||
const DEFAULTS = CA_PROCESSOR_DATA['depth_anything_image_processor'].buildDefaults();
|
||||
|
||||
export const DepthAnythingProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
@@ -38,12 +37,7 @@ export const DepthAnythingProcessor = memo(({ onChange, config }: Props) => {
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.modelSize')}</FormLabel>
|
||||
<Combobox
|
||||
value={value}
|
||||
defaultInputValue={DEFAULTS.model_size}
|
||||
options={options}
|
||||
onChange={handleModelSizeChange}
|
||||
/>
|
||||
<Combobox value={value} options={options} onChange={handleModelSizeChange} isSearchable={false} />
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
|
||||
@@ -27,7 +27,7 @@ import { modelChanged } from 'features/parameters/store/generationSlice';
|
||||
import type { ParameterAutoNegative } from 'features/parameters/types/parameterSchemas';
|
||||
import { getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
import type { IRect, Vector2d } from 'konva/lib/types';
|
||||
import { isEqual, partition } from 'lodash-es';
|
||||
import { isEqual, partition, unset } from 'lodash-es';
|
||||
import { atom } from 'nanostores';
|
||||
import type { RgbColor } from 'react-colorful';
|
||||
import type { UndoableOptions } from 'redux-undo';
|
||||
@@ -49,7 +49,7 @@ import type {
|
||||
} from './types';
|
||||
|
||||
export const initialControlLayersState: ControlLayersState = {
|
||||
_version: 2,
|
||||
_version: 3,
|
||||
selectedLayerId: null,
|
||||
brushSize: 100,
|
||||
layers: [],
|
||||
@@ -334,13 +334,13 @@ export const controlLayersSlice = createSlice({
|
||||
const layer = selectCALayerOrThrow(state, layerId);
|
||||
layer.opacity = opacity;
|
||||
},
|
||||
caLayerIsProcessingImageChanged: (
|
||||
caLayerProcessorPendingBatchIdChanged: (
|
||||
state,
|
||||
action: PayloadAction<{ layerId: string; isProcessingImage: boolean }>
|
||||
action: PayloadAction<{ layerId: string; batchId: string | null }>
|
||||
) => {
|
||||
const { layerId, isProcessingImage } = action.payload;
|
||||
const { layerId, batchId } = action.payload;
|
||||
const layer = selectCALayerOrThrow(state, layerId);
|
||||
layer.controlAdapter.isProcessingImage = isProcessingImage;
|
||||
layer.controlAdapter.processorPendingBatchId = batchId;
|
||||
},
|
||||
//#endregion
|
||||
|
||||
@@ -616,12 +616,24 @@ export const controlLayersSlice = createSlice({
|
||||
iiLayerAdded: {
|
||||
reducer: (state, action: PayloadAction<{ layerId: string; imageDTO: ImageDTO | null }>) => {
|
||||
const { layerId, imageDTO } = action.payload;
|
||||
|
||||
// Retain opacity and denoising strength of existing initial image layer if exists
|
||||
let opacity = 1;
|
||||
let denoisingStrength = 0.75;
|
||||
const iiLayer = state.layers.find((l) => l.id === layerId);
|
||||
if (iiLayer) {
|
||||
assert(isInitialImageLayer(iiLayer));
|
||||
opacity = iiLayer.opacity;
|
||||
denoisingStrength = iiLayer.denoisingStrength;
|
||||
}
|
||||
|
||||
// Highlander! There can be only one!
|
||||
state.layers = state.layers.filter((l) => (isInitialImageLayer(l) ? false : true));
|
||||
|
||||
const layer: InitialImageLayer = {
|
||||
id: layerId,
|
||||
type: 'initial_image_layer',
|
||||
opacity: 1,
|
||||
opacity,
|
||||
x: 0,
|
||||
y: 0,
|
||||
bbox: null,
|
||||
@@ -629,7 +641,7 @@ export const controlLayersSlice = createSlice({
|
||||
isEnabled: true,
|
||||
image: imageDTO ? imageDTOToImageWithDims(imageDTO) : null,
|
||||
isSelected: true,
|
||||
denoisingStrength: 0.75,
|
||||
denoisingStrength,
|
||||
};
|
||||
state.layers.push(layer);
|
||||
exclusivelySelectLayer(state, layer.id);
|
||||
@@ -800,7 +812,7 @@ export const {
|
||||
caLayerProcessorConfigChanged,
|
||||
caLayerIsFilterEnabledChanged,
|
||||
caLayerOpacityChanged,
|
||||
caLayerIsProcessingImageChanged,
|
||||
caLayerProcessorPendingBatchIdChanged,
|
||||
// IPA Layers
|
||||
ipaLayerAdded,
|
||||
ipaLayerRecalled,
|
||||
@@ -857,7 +869,16 @@ export const selectControlLayersSlice = (state: RootState) => state.controlLayer
|
||||
const migrateControlLayersState = (state: any): any => {
|
||||
if (state._version === 1) {
|
||||
// Reset state for users on v1 (e.g. beta users), some changes could cause
|
||||
return deepClone(initialControlLayersState);
|
||||
state = deepClone(initialControlLayersState);
|
||||
}
|
||||
if (state._version === 2) {
|
||||
// The CA `isProcessingImage` flag was replaced with a `processorPendingBatchId` property, fix up CA layers
|
||||
for (const layer of (state as ControlLayersState).layers) {
|
||||
if (layer.type === 'control_adapter_layer') {
|
||||
layer.controlAdapter.processorPendingBatchId = null;
|
||||
unset(layer.controlAdapter, 'isProcessingImage');
|
||||
}
|
||||
}
|
||||
}
|
||||
return state;
|
||||
};
|
||||
|
||||
@@ -113,7 +113,7 @@ export const zLayer = z.discriminatedUnion('type', [
|
||||
export type Layer = z.infer<typeof zLayer>;
|
||||
|
||||
export type ControlLayersState = {
|
||||
_version: 2;
|
||||
_version: 3;
|
||||
selectedLayerId: string | null;
|
||||
layers: Layer[];
|
||||
brushSize: number;
|
||||
|
||||
@@ -1,23 +1,9 @@
|
||||
import type { S } from 'services/api/types';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
import { describe, test } from 'vitest';
|
||||
|
||||
import type {
|
||||
_CannyProcessorConfig,
|
||||
_ColorMapProcessorConfig,
|
||||
_ContentShuffleProcessorConfig,
|
||||
_DepthAnythingProcessorConfig,
|
||||
_DWOpenposeProcessorConfig,
|
||||
_HedProcessorConfig,
|
||||
_LineartAnimeProcessorConfig,
|
||||
_LineartProcessorConfig,
|
||||
_MediapipeFaceProcessorConfig,
|
||||
_MidasDepthProcessorConfig,
|
||||
_MlsdProcessorConfig,
|
||||
_NormalbaeProcessorConfig,
|
||||
_PidiProcessorConfig,
|
||||
_ZoeDepthProcessorConfig,
|
||||
CannyProcessorConfig,
|
||||
CLIPVisionModelV2,
|
||||
ColorMapProcessorConfig,
|
||||
@@ -45,16 +31,16 @@ describe('Control Adapter Types', () => {
|
||||
assert<Equals<ProcessorConfig['type'], ProcessorTypeV2>>();
|
||||
});
|
||||
test('IP Adapter Method', () => {
|
||||
assert<Equals<NonNullable<S['IPAdapterInvocation']['method']>, IPMethodV2>>();
|
||||
assert<Equals<NonNullable<Invocation<'ip_adapter'>['method']>, IPMethodV2>>();
|
||||
});
|
||||
test('CLIP Vision Model', () => {
|
||||
assert<Equals<NonNullable<S['IPAdapterInvocation']['clip_vision_model']>, CLIPVisionModelV2>>();
|
||||
assert<Equals<NonNullable<Invocation<'ip_adapter'>['clip_vision_model']>, CLIPVisionModelV2>>();
|
||||
});
|
||||
test('Control Mode', () => {
|
||||
assert<Equals<NonNullable<S['ControlNetInvocation']['control_mode']>, ControlModeV2>>();
|
||||
assert<Equals<NonNullable<Invocation<'controlnet'>['control_mode']>, ControlModeV2>>();
|
||||
});
|
||||
test('DepthAnything Model Size', () => {
|
||||
assert<Equals<NonNullable<S['DepthAnythingImageProcessorInvocation']['model_size']>, DepthAnythingModelSize>>();
|
||||
assert<Equals<NonNullable<Invocation<'depth_anything_image_processor'>['model_size']>, DepthAnythingModelSize>>();
|
||||
});
|
||||
test('Processor Configs', () => {
|
||||
// The processor configs are manually modeled zod schemas. This test ensures that the inferred types are correct.
|
||||
@@ -75,3 +61,33 @@ describe('Control Adapter Types', () => {
|
||||
assert<Equals<_ZoeDepthProcessorConfig, ZoeDepthProcessorConfig>>();
|
||||
});
|
||||
});
|
||||
|
||||
// Types derived from OpenAPI
|
||||
type _CannyProcessorConfig = Required<
|
||||
Pick<Invocation<'canny_image_processor'>, 'id' | 'type' | 'low_threshold' | 'high_threshold'>
|
||||
>;
|
||||
type _ColorMapProcessorConfig = Required<
|
||||
Pick<Invocation<'color_map_image_processor'>, 'id' | 'type' | 'color_map_tile_size'>
|
||||
>;
|
||||
type _ContentShuffleProcessorConfig = Required<
|
||||
Pick<Invocation<'content_shuffle_image_processor'>, 'id' | 'type' | 'w' | 'h' | 'f'>
|
||||
>;
|
||||
type _DepthAnythingProcessorConfig = Required<
|
||||
Pick<Invocation<'depth_anything_image_processor'>, 'id' | 'type' | 'model_size'>
|
||||
>;
|
||||
type _HedProcessorConfig = Required<Pick<Invocation<'hed_image_processor'>, 'id' | 'type' | 'scribble'>>;
|
||||
type _LineartAnimeProcessorConfig = Required<Pick<Invocation<'lineart_anime_image_processor'>, 'id' | 'type'>>;
|
||||
type _LineartProcessorConfig = Required<Pick<Invocation<'lineart_image_processor'>, 'id' | 'type' | 'coarse'>>;
|
||||
type _MediapipeFaceProcessorConfig = Required<
|
||||
Pick<Invocation<'mediapipe_face_processor'>, 'id' | 'type' | 'max_faces' | 'min_confidence'>
|
||||
>;
|
||||
type _MidasDepthProcessorConfig = Required<
|
||||
Pick<Invocation<'midas_depth_image_processor'>, 'id' | 'type' | 'a_mult' | 'bg_th'>
|
||||
>;
|
||||
type _MlsdProcessorConfig = Required<Pick<Invocation<'mlsd_image_processor'>, 'id' | 'type' | 'thr_v' | 'thr_d'>>;
|
||||
type _NormalbaeProcessorConfig = Required<Pick<Invocation<'normalbae_image_processor'>, 'id' | 'type'>>;
|
||||
type _DWOpenposeProcessorConfig = Required<
|
||||
Pick<Invocation<'dw_openpose_image_processor'>, 'id' | 'type' | 'draw_body' | 'draw_face' | 'draw_hands'>
|
||||
>;
|
||||
type _PidiProcessorConfig = Required<Pick<Invocation<'pidi_image_processor'>, 'id' | 'type' | 'safe' | 'scribble'>>;
|
||||
type _ZoeDepthProcessorConfig = Required<Pick<Invocation<'zoe_depth_image_processor'>, 'id' | 'type'>>;
|
||||
|
||||
@@ -1,27 +1,7 @@
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { merge, omit } from 'lodash-es';
|
||||
import type {
|
||||
BaseModelType,
|
||||
CannyImageProcessorInvocation,
|
||||
ColorMapImageProcessorInvocation,
|
||||
ContentShuffleImageProcessorInvocation,
|
||||
ControlNetModelConfig,
|
||||
DepthAnythingImageProcessorInvocation,
|
||||
DWOpenposeImageProcessorInvocation,
|
||||
Graph,
|
||||
HedImageProcessorInvocation,
|
||||
ImageDTO,
|
||||
LineartAnimeImageProcessorInvocation,
|
||||
LineartImageProcessorInvocation,
|
||||
MediapipeFaceProcessorInvocation,
|
||||
MidasDepthImageProcessorInvocation,
|
||||
MlsdImageProcessorInvocation,
|
||||
NormalbaeImageProcessorInvocation,
|
||||
PidiImageProcessorInvocation,
|
||||
T2IAdapterModelConfig,
|
||||
ZoeDepthImageProcessorInvocation,
|
||||
} from 'services/api/types';
|
||||
import type { BaseModelType, ControlNetModelConfig, Graph, ImageDTO, T2IAdapterModelConfig } from 'services/api/types';
|
||||
import { z } from 'zod';
|
||||
|
||||
const zId = z.string().min(1);
|
||||
@@ -32,9 +12,6 @@ const zCannyProcessorConfig = z.object({
|
||||
low_threshold: z.number().int().gte(0).lte(255),
|
||||
high_threshold: z.number().int().gte(0).lte(255),
|
||||
});
|
||||
export type _CannyProcessorConfig = Required<
|
||||
Pick<CannyImageProcessorInvocation, 'id' | 'type' | 'low_threshold' | 'high_threshold'>
|
||||
>;
|
||||
export type CannyProcessorConfig = z.infer<typeof zCannyProcessorConfig>;
|
||||
|
||||
const zColorMapProcessorConfig = z.object({
|
||||
@@ -42,9 +19,6 @@ const zColorMapProcessorConfig = z.object({
|
||||
type: z.literal('color_map_image_processor'),
|
||||
color_map_tile_size: z.number().int().gte(1),
|
||||
});
|
||||
export type _ColorMapProcessorConfig = Required<
|
||||
Pick<ColorMapImageProcessorInvocation, 'id' | 'type' | 'color_map_tile_size'>
|
||||
>;
|
||||
export type ColorMapProcessorConfig = z.infer<typeof zColorMapProcessorConfig>;
|
||||
|
||||
const zContentShuffleProcessorConfig = z.object({
|
||||
@@ -54,9 +28,6 @@ const zContentShuffleProcessorConfig = z.object({
|
||||
h: z.number().int().gte(0),
|
||||
f: z.number().int().gte(0),
|
||||
});
|
||||
export type _ContentShuffleProcessorConfig = Required<
|
||||
Pick<ContentShuffleImageProcessorInvocation, 'id' | 'type' | 'w' | 'h' | 'f'>
|
||||
>;
|
||||
export type ContentShuffleProcessorConfig = z.infer<typeof zContentShuffleProcessorConfig>;
|
||||
|
||||
const zDepthAnythingModelSize = z.enum(['large', 'base', 'small']);
|
||||
@@ -68,9 +39,6 @@ const zDepthAnythingProcessorConfig = z.object({
|
||||
type: z.literal('depth_anything_image_processor'),
|
||||
model_size: zDepthAnythingModelSize,
|
||||
});
|
||||
export type _DepthAnythingProcessorConfig = Required<
|
||||
Pick<DepthAnythingImageProcessorInvocation, 'id' | 'type' | 'model_size'>
|
||||
>;
|
||||
export type DepthAnythingProcessorConfig = z.infer<typeof zDepthAnythingProcessorConfig>;
|
||||
|
||||
const zHedProcessorConfig = z.object({
|
||||
@@ -78,14 +46,12 @@ const zHedProcessorConfig = z.object({
|
||||
type: z.literal('hed_image_processor'),
|
||||
scribble: z.boolean(),
|
||||
});
|
||||
export type _HedProcessorConfig = Required<Pick<HedImageProcessorInvocation, 'id' | 'type' | 'scribble'>>;
|
||||
export type HedProcessorConfig = z.infer<typeof zHedProcessorConfig>;
|
||||
|
||||
const zLineartAnimeProcessorConfig = z.object({
|
||||
id: zId,
|
||||
type: z.literal('lineart_anime_image_processor'),
|
||||
});
|
||||
export type _LineartAnimeProcessorConfig = Required<Pick<LineartAnimeImageProcessorInvocation, 'id' | 'type'>>;
|
||||
export type LineartAnimeProcessorConfig = z.infer<typeof zLineartAnimeProcessorConfig>;
|
||||
|
||||
const zLineartProcessorConfig = z.object({
|
||||
@@ -93,7 +59,6 @@ const zLineartProcessorConfig = z.object({
|
||||
type: z.literal('lineart_image_processor'),
|
||||
coarse: z.boolean(),
|
||||
});
|
||||
export type _LineartProcessorConfig = Required<Pick<LineartImageProcessorInvocation, 'id' | 'type' | 'coarse'>>;
|
||||
export type LineartProcessorConfig = z.infer<typeof zLineartProcessorConfig>;
|
||||
|
||||
const zMediapipeFaceProcessorConfig = z.object({
|
||||
@@ -102,9 +67,6 @@ const zMediapipeFaceProcessorConfig = z.object({
|
||||
max_faces: z.number().int().gte(1),
|
||||
min_confidence: z.number().gte(0).lte(1),
|
||||
});
|
||||
export type _MediapipeFaceProcessorConfig = Required<
|
||||
Pick<MediapipeFaceProcessorInvocation, 'id' | 'type' | 'max_faces' | 'min_confidence'>
|
||||
>;
|
||||
export type MediapipeFaceProcessorConfig = z.infer<typeof zMediapipeFaceProcessorConfig>;
|
||||
|
||||
const zMidasDepthProcessorConfig = z.object({
|
||||
@@ -113,9 +75,6 @@ const zMidasDepthProcessorConfig = z.object({
|
||||
a_mult: z.number().gte(0),
|
||||
bg_th: z.number().gte(0),
|
||||
});
|
||||
export type _MidasDepthProcessorConfig = Required<
|
||||
Pick<MidasDepthImageProcessorInvocation, 'id' | 'type' | 'a_mult' | 'bg_th'>
|
||||
>;
|
||||
export type MidasDepthProcessorConfig = z.infer<typeof zMidasDepthProcessorConfig>;
|
||||
|
||||
const zMlsdProcessorConfig = z.object({
|
||||
@@ -124,14 +83,12 @@ const zMlsdProcessorConfig = z.object({
|
||||
thr_v: z.number().gte(0),
|
||||
thr_d: z.number().gte(0),
|
||||
});
|
||||
export type _MlsdProcessorConfig = Required<Pick<MlsdImageProcessorInvocation, 'id' | 'type' | 'thr_v' | 'thr_d'>>;
|
||||
export type MlsdProcessorConfig = z.infer<typeof zMlsdProcessorConfig>;
|
||||
|
||||
const zNormalbaeProcessorConfig = z.object({
|
||||
id: zId,
|
||||
type: z.literal('normalbae_image_processor'),
|
||||
});
|
||||
export type _NormalbaeProcessorConfig = Required<Pick<NormalbaeImageProcessorInvocation, 'id' | 'type'>>;
|
||||
export type NormalbaeProcessorConfig = z.infer<typeof zNormalbaeProcessorConfig>;
|
||||
|
||||
const zDWOpenposeProcessorConfig = z.object({
|
||||
@@ -141,9 +98,6 @@ const zDWOpenposeProcessorConfig = z.object({
|
||||
draw_face: z.boolean(),
|
||||
draw_hands: z.boolean(),
|
||||
});
|
||||
export type _DWOpenposeProcessorConfig = Required<
|
||||
Pick<DWOpenposeImageProcessorInvocation, 'id' | 'type' | 'draw_body' | 'draw_face' | 'draw_hands'>
|
||||
>;
|
||||
export type DWOpenposeProcessorConfig = z.infer<typeof zDWOpenposeProcessorConfig>;
|
||||
|
||||
const zPidiProcessorConfig = z.object({
|
||||
@@ -152,14 +106,12 @@ const zPidiProcessorConfig = z.object({
|
||||
safe: z.boolean(),
|
||||
scribble: z.boolean(),
|
||||
});
|
||||
export type _PidiProcessorConfig = Required<Pick<PidiImageProcessorInvocation, 'id' | 'type' | 'safe' | 'scribble'>>;
|
||||
export type PidiProcessorConfig = z.infer<typeof zPidiProcessorConfig>;
|
||||
|
||||
const zZoeDepthProcessorConfig = z.object({
|
||||
id: zId,
|
||||
type: z.literal('zoe_depth_image_processor'),
|
||||
});
|
||||
export type _ZoeDepthProcessorConfig = Required<Pick<ZoeDepthImageProcessorInvocation, 'id' | 'type'>>;
|
||||
export type ZoeDepthProcessorConfig = z.infer<typeof zZoeDepthProcessorConfig>;
|
||||
|
||||
const zProcessorConfig = z.discriminatedUnion('type', [
|
||||
@@ -198,8 +150,8 @@ const zControlAdapterBase = z.object({
|
||||
weight: z.number().gte(0).lte(1),
|
||||
image: zImageWithDims.nullable(),
|
||||
processedImage: zImageWithDims.nullable(),
|
||||
isProcessingImage: z.boolean(),
|
||||
processorConfig: zProcessorConfig.nullable(),
|
||||
processorPendingBatchId: z.string().nullable().default(null),
|
||||
beginEndStepPct: zBeginEndStepPct,
|
||||
});
|
||||
|
||||
@@ -521,8 +473,8 @@ export const initialControlNetV2: Omit<ControlNetConfigV2, 'id'> = {
|
||||
controlMode: 'balanced',
|
||||
image: null,
|
||||
processedImage: null,
|
||||
isProcessingImage: false,
|
||||
processorConfig: CA_PROCESSOR_DATA.canny_image_processor.buildDefaults(),
|
||||
processorPendingBatchId: null,
|
||||
};
|
||||
|
||||
export const initialT2IAdapterV2: Omit<T2IAdapterConfigV2, 'id'> = {
|
||||
@@ -532,8 +484,8 @@ export const initialT2IAdapterV2: Omit<T2IAdapterConfigV2, 'id'> = {
|
||||
beginEndStepPct: [0, 1],
|
||||
image: null,
|
||||
processedImage: null,
|
||||
isProcessingImage: false,
|
||||
processorConfig: CA_PROCESSOR_DATA.canny_image_processor.buildDefaults(),
|
||||
processorPendingBatchId: null,
|
||||
};
|
||||
|
||||
export const initialIPAdapterV2: Omit<IPAdapterConfigV2, 'id'> = {
|
||||
|
||||
@@ -464,6 +464,7 @@ const createInitialImageLayer = (stage: Konva.Stage, reduxLayer: InitialImageLay
|
||||
id: reduxLayer.id,
|
||||
name: INITIAL_IMAGE_LAYER_NAME,
|
||||
imageSmoothingEnabled: true,
|
||||
listening: false,
|
||||
});
|
||||
stage.add(konvaLayer);
|
||||
return konvaLayer;
|
||||
@@ -567,6 +568,7 @@ const createControlNetLayer = (stage: Konva.Stage, reduxLayer: ControlAdapterLay
|
||||
id: reduxLayer.id,
|
||||
name: CA_LAYER_NAME,
|
||||
imageSmoothingEnabled: true,
|
||||
listening: false,
|
||||
});
|
||||
stage.add(konvaLayer);
|
||||
return konvaLayer;
|
||||
|
||||
@@ -1,23 +1,21 @@
|
||||
import type { Modifier } from '@dnd-kit/core';
|
||||
import { getEventCoordinates } from '@dnd-kit/utilities';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectNodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import { $viewport } from 'features/nodes/store/nodesSlice';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { useCallback } from 'react';
|
||||
|
||||
const selectZoom = createSelector([selectNodesSlice, activeTabNameSelector], (nodes, activeTabName) =>
|
||||
activeTabName === 'workflows' ? nodes.viewport.zoom : 1
|
||||
);
|
||||
|
||||
/**
|
||||
* Applies scaling to the drag transform (if on node editor tab) and centers it on cursor.
|
||||
*/
|
||||
export const useScaledModifer = () => {
|
||||
const zoom = useAppSelector(selectZoom);
|
||||
const activeTabName = useAppSelector(activeTabNameSelector);
|
||||
const workflowsViewport = useStore($viewport);
|
||||
const modifier: Modifier = useCallback(
|
||||
({ activatorEvent, draggingNodeRect, transform }) => {
|
||||
if (draggingNodeRect && activatorEvent) {
|
||||
const zoom = activeTabName === 'workflows' ? workflowsViewport.zoom : 1;
|
||||
const activatorCoordinates = getEventCoordinates(activatorEvent);
|
||||
|
||||
if (!activatorCoordinates) {
|
||||
@@ -42,7 +40,7 @@ export const useScaledModifer = () => {
|
||||
|
||||
return transform;
|
||||
},
|
||||
[zoom]
|
||||
[activeTabName, workflowsViewport.zoom]
|
||||
);
|
||||
|
||||
return modifier;
|
||||
|
||||
@@ -11,10 +11,12 @@ import { iiLayerAdded } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { imagesToDeleteSelected } from 'features/deleteImageModal/store/slice';
|
||||
import { useImageActions } from 'features/gallery/hooks/useImageActions';
|
||||
import { sentImageToCanvas, sentImageToImg2Img } from 'features/gallery/store/actions';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { useGetAndLoadEmbeddedWorkflow } from 'features/workflowLibrary/hooks/useGetAndLoadEmbeddedWorkflow';
|
||||
import { size } from 'lodash-es';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { flushSync } from 'react-dom';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -48,6 +50,7 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
|
||||
const isCanvasEnabled = useFeatureStatus('canvas');
|
||||
const customStarUi = useStore($customStarUI);
|
||||
const { downloadImage } = useDownloadImage();
|
||||
const templates = useStore($templates);
|
||||
|
||||
const { recallAll, remix, recallSeed, recallPrompts, hasMetadata, hasSeed, hasPrompts, isLoadingMetadata } =
|
||||
useImageActions(imageDTO?.image_name);
|
||||
@@ -133,7 +136,7 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
|
||||
<MenuItem
|
||||
icon={getAndLoadEmbeddedWorkflowResult.isLoading ? <SpinnerIcon /> : <PiFlowArrowBold />}
|
||||
onClickCapture={handleLoadWorkflow}
|
||||
isDisabled={!imageDTO.has_workflow}
|
||||
isDisabled={!imageDTO.has_workflow || !size(templates)}
|
||||
>
|
||||
{t('nodes.loadWorkflow')}
|
||||
</MenuItem>
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useDebouncedImageWorkflow } from 'services/api/hooks/useDebouncedImageWorkflow';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
import DataViewer from './DataViewer';
|
||||
|
||||
type Props = {
|
||||
image: ImageDTO;
|
||||
};
|
||||
|
||||
const ImageMetadataGraphTabContent = ({ image }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const { currentData } = useDebouncedImageWorkflow(image);
|
||||
const graph = useMemo(() => {
|
||||
if (currentData?.graph) {
|
||||
try {
|
||||
return JSON.parse(currentData.graph);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}, [currentData]);
|
||||
|
||||
if (!graph) {
|
||||
return <IAINoContentFallback label={t('nodes.noGraph')} />;
|
||||
}
|
||||
|
||||
return <DataViewer data={graph} label={t('nodes.graph')} />;
|
||||
};
|
||||
|
||||
export default memo(ImageMetadataGraphTabContent);
|
||||
@@ -1,6 +1,7 @@
|
||||
import { ExternalLink, Flex, Tab, TabList, TabPanel, TabPanels, Tabs, Text } from '@invoke-ai/ui-library';
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
||||
import ImageMetadataGraphTabContent from 'features/gallery/components/ImageMetadataViewer/ImageMetadataGraphTabContent';
|
||||
import { useMetadataItem } from 'features/metadata/hooks/useMetadataItem';
|
||||
import { handlers } from 'features/metadata/util/handlers';
|
||||
import { memo } from 'react';
|
||||
@@ -52,6 +53,7 @@ const ImageMetadataViewer = ({ image }: ImageMetadataViewerProps) => {
|
||||
<Tab>{t('metadata.metadata')}</Tab>
|
||||
<Tab>{t('metadata.imageDetails')}</Tab>
|
||||
<Tab>{t('metadata.workflow')}</Tab>
|
||||
<Tab>{t('nodes.graph')}</Tab>
|
||||
</TabList>
|
||||
|
||||
<TabPanels>
|
||||
@@ -81,6 +83,9 @@ const ImageMetadataViewer = ({ image }: ImageMetadataViewerProps) => {
|
||||
<TabPanel>
|
||||
<ImageMetadataWorkflowTabContent image={image} />
|
||||
</TabPanel>
|
||||
<TabPanel>
|
||||
<ImageMetadataGraphTabContent image={image} />
|
||||
</TabPanel>
|
||||
</TabPanels>
|
||||
</Tabs>
|
||||
</Flex>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import { memo } from 'react';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useDebouncedImageWorkflow } from 'services/api/hooks/useDebouncedImageWorkflow';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
@@ -12,7 +12,17 @@ type Props = {
|
||||
|
||||
const ImageMetadataWorkflowTabContent = ({ image }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const { workflow } = useDebouncedImageWorkflow(image);
|
||||
const { currentData } = useDebouncedImageWorkflow(image);
|
||||
const workflow = useMemo(() => {
|
||||
if (currentData?.workflow) {
|
||||
try {
|
||||
return JSON.parse(currentData.workflow);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}, [currentData]);
|
||||
|
||||
if (!workflow) {
|
||||
return <IAINoContentFallback label={t('nodes.noWorkflow')} />;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { ButtonGroup, IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { skipToken } from '@reduxjs/toolkit/query';
|
||||
import { upscaleRequested } from 'app/store/middleware/listenerMiddleware/listeners/upscaleRequested';
|
||||
@@ -12,12 +13,14 @@ import { sentImageToImg2Img } from 'features/gallery/store/actions';
|
||||
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
|
||||
import { selectGallerySlice } from 'features/gallery/store/gallerySlice';
|
||||
import { parseAndRecallImageDimensions } from 'features/metadata/util/handlers';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import ParamUpscalePopover from 'features/parameters/components/Upscale/ParamUpscaleSettings';
|
||||
import { useIsQueueMutationInProgress } from 'features/queue/hooks/useIsQueueMutationInProgress';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { selectSystemSlice } from 'features/system/store/systemSlice';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import { useGetAndLoadEmbeddedWorkflow } from 'features/workflowLibrary/hooks/useGetAndLoadEmbeddedWorkflow';
|
||||
import { size } from 'lodash-es';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useHotkeys } from 'react-hotkeys-hook';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -48,7 +51,7 @@ const CurrentImageButtons = () => {
|
||||
const lastSelectedImage = useAppSelector(selectLastSelectedImage);
|
||||
const selection = useAppSelector((s) => s.gallery.selection);
|
||||
const shouldDisableToolbarButtons = useAppSelector(selectShouldDisableToolbarButtons);
|
||||
|
||||
const templates = useStore($templates);
|
||||
const isUpscalingEnabled = useFeatureStatus('upscaling');
|
||||
const isQueueMutationInProgress = useIsQueueMutationInProgress();
|
||||
const { t } = useTranslation();
|
||||
@@ -143,7 +146,7 @@ const CurrentImageButtons = () => {
|
||||
icon={<PiFlowArrowBold />}
|
||||
tooltip={`${t('nodes.loadWorkflow')} (W)`}
|
||||
aria-label={`${t('nodes.loadWorkflow')} (W)`}
|
||||
isDisabled={!imageDTO?.has_workflow}
|
||||
isDisabled={!imageDTO?.has_workflow || !size(templates)}
|
||||
onClick={handleLoadWorkflow}
|
||||
isLoading={getAndLoadEmbeddedWorkflowResult.isLoading}
|
||||
/>
|
||||
|
||||
@@ -75,8 +75,8 @@ export const LoRACard = memo((props: LoRACardProps) => {
|
||||
<CompositeNumberInput
|
||||
value={lora.weight}
|
||||
onChange={handleChange}
|
||||
min={-5}
|
||||
max={5}
|
||||
min={-10}
|
||||
max={10}
|
||||
step={0.01}
|
||||
w={20}
|
||||
flexShrink={0}
|
||||
|
||||
@@ -587,7 +587,7 @@ const parseControlNetToControlAdapterLayer: MetadataParseFunc<ControlAdapterLaye
|
||||
image: imageDTO ? imageDTOToImageWithDims(imageDTO) : null,
|
||||
processedImage: processedImageDTO ? imageDTOToImageWithDims(processedImageDTO) : null,
|
||||
processorConfig,
|
||||
isProcessingImage: false,
|
||||
processorPendingBatchId: null,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -651,7 +651,7 @@ const parseT2IAdapterToControlAdapterLayer: MetadataParseFunc<ControlAdapterLaye
|
||||
image: imageDTO ? imageDTOToImageWithDims(imageDTO) : null,
|
||||
processedImage: processedImageDTO ? imageDTOToImageWithDims(processedImageDTO) : null,
|
||||
processorConfig,
|
||||
isProcessingImage: false,
|
||||
processorPendingBatchId: null,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -2,26 +2,36 @@ import 'reactflow/dist/style.css';
|
||||
|
||||
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import { Combobox, Flex, Popover, PopoverAnchor, PopoverBody, PopoverContent } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppToaster } from 'app/components/Toaster';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAppDispatch, useAppStore } from 'app/store/storeHooks';
|
||||
import type { SelectInstance } from 'chakra-react-select';
|
||||
import { useBuildNode } from 'features/nodes/hooks/useBuildNode';
|
||||
import {
|
||||
addNodePopoverClosed,
|
||||
addNodePopoverOpened,
|
||||
nodeAdded,
|
||||
selectNodesSlice,
|
||||
$cursorPos,
|
||||
$edgePendingUpdate,
|
||||
$isAddNodePopoverOpen,
|
||||
$pendingConnection,
|
||||
$templates,
|
||||
closeAddNodePopover,
|
||||
edgesChanged,
|
||||
nodesChanged,
|
||||
openAddNodePopover,
|
||||
} from 'features/nodes/store/nodesSlice';
|
||||
import { validateSourceAndTargetTypes } from 'features/nodes/store/util/validateSourceAndTargetTypes';
|
||||
import { findUnoccupiedPosition } from 'features/nodes/store/util/findUnoccupiedPosition';
|
||||
import { getFirstValidConnection } from 'features/nodes/store/util/getFirstValidConnection';
|
||||
import { connectionToEdge } from 'features/nodes/store/util/reactFlowUtil';
|
||||
import { validateConnectionTypes } from 'features/nodes/store/util/validateConnectionTypes';
|
||||
import type { AnyNode } from 'features/nodes/types/invocation';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { filter, map, memoize, some } from 'lodash-es';
|
||||
import type { KeyboardEventHandler } from 'react';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
import { flushSync } from 'react-dom';
|
||||
import { useHotkeys } from 'react-hotkeys-hook';
|
||||
import type { HotkeyCallback } from 'react-hotkeys-hook/dist/types';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import type { FilterOptionOption } from 'react-select/dist/declarations/src/filters';
|
||||
import type { EdgeChange, NodeChange } from 'reactflow';
|
||||
|
||||
const createRegex = memoize(
|
||||
(inputValue: string) =>
|
||||
@@ -54,26 +64,32 @@ const AddNodePopover = () => {
|
||||
const { t } = useTranslation();
|
||||
const selectRef = useRef<SelectInstance<ComboboxOption> | null>(null);
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
const templates = useStore($templates);
|
||||
const pendingConnection = useStore($pendingConnection);
|
||||
const isOpen = useStore($isAddNodePopoverOpen);
|
||||
const store = useAppStore();
|
||||
|
||||
const fieldFilter = useAppSelector((s) => s.nodes.connectionStartFieldType);
|
||||
const handleFilter = useAppSelector((s) => s.nodes.connectionStartParams?.handleType);
|
||||
|
||||
const selector = createMemoizedSelector(selectNodesSlice, (nodes) => {
|
||||
const filteredTemplates = useMemo(() => {
|
||||
// If we have a connection in progress, we need to filter the node choices
|
||||
const filteredNodeTemplates = fieldFilter
|
||||
? filter(nodes.templates, (template) => {
|
||||
const handles = handleFilter === 'source' ? template.inputs : template.outputs;
|
||||
const templatesArray = map(templates);
|
||||
if (!pendingConnection) {
|
||||
return templatesArray;
|
||||
}
|
||||
|
||||
return some(handles, (handle) => {
|
||||
const sourceType = handleFilter === 'source' ? fieldFilter : handle.type;
|
||||
const targetType = handleFilter === 'target' ? fieldFilter : handle.type;
|
||||
return filter(templates, (template) => {
|
||||
const candidateFields = pendingConnection.handleType === 'source' ? template.inputs : template.outputs;
|
||||
return some(candidateFields, (field) => {
|
||||
const sourceType =
|
||||
pendingConnection.handleType === 'source' ? field.type : pendingConnection.fieldTemplate.type;
|
||||
const targetType =
|
||||
pendingConnection.handleType === 'target' ? field.type : pendingConnection.fieldTemplate.type;
|
||||
return validateConnectionTypes(sourceType, targetType);
|
||||
});
|
||||
});
|
||||
}, [templates, pendingConnection]);
|
||||
|
||||
return validateSourceAndTargetTypes(sourceType, targetType);
|
||||
});
|
||||
})
|
||||
: map(nodes.templates);
|
||||
|
||||
const options: ComboboxOption[] = map(filteredNodeTemplates, (template) => {
|
||||
const options = useMemo(() => {
|
||||
const _options: ComboboxOption[] = map(filteredTemplates, (template) => {
|
||||
return {
|
||||
label: template.title,
|
||||
value: template.type,
|
||||
@@ -83,15 +99,15 @@ const AddNodePopover = () => {
|
||||
});
|
||||
|
||||
//We only want these nodes if we're not filtered
|
||||
if (fieldFilter === null) {
|
||||
options.push({
|
||||
if (!pendingConnection) {
|
||||
_options.push({
|
||||
label: t('nodes.currentImage'),
|
||||
value: 'current_image',
|
||||
description: t('nodes.currentImageDescription'),
|
||||
tags: ['progress'],
|
||||
});
|
||||
|
||||
options.push({
|
||||
_options.push({
|
||||
label: t('nodes.notes'),
|
||||
value: 'notes',
|
||||
description: t('nodes.notesDescription'),
|
||||
@@ -99,18 +115,15 @@ const AddNodePopover = () => {
|
||||
});
|
||||
}
|
||||
|
||||
options.sort((a, b) => a.label.localeCompare(b.label));
|
||||
_options.sort((a, b) => a.label.localeCompare(b.label));
|
||||
|
||||
return { options };
|
||||
});
|
||||
|
||||
const { options } = useAppSelector(selector);
|
||||
const isOpen = useAppSelector((s) => s.nodes.isAddNodePopoverOpen);
|
||||
return _options;
|
||||
}, [filteredTemplates, pendingConnection, t]);
|
||||
|
||||
const addNode = useCallback(
|
||||
(nodeType: string) => {
|
||||
const invocation = buildInvocation(nodeType);
|
||||
if (!invocation) {
|
||||
(nodeType: string): AnyNode | null => {
|
||||
const node = buildInvocation(nodeType);
|
||||
if (!node) {
|
||||
const errorMessage = t('nodes.unknownNode', {
|
||||
nodeType: nodeType,
|
||||
});
|
||||
@@ -118,12 +131,39 @@ const AddNodePopover = () => {
|
||||
status: 'error',
|
||||
title: errorMessage,
|
||||
});
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
|
||||
dispatch(nodeAdded(invocation));
|
||||
// Find a cozy spot for the node
|
||||
const cursorPos = $cursorPos.get();
|
||||
const { nodes, edges } = store.getState().nodes.present;
|
||||
node.position = findUnoccupiedPosition(nodes, cursorPos?.x ?? node.position.x, cursorPos?.y ?? node.position.y);
|
||||
node.selected = true;
|
||||
|
||||
// Deselect all other nodes and edges
|
||||
const nodeChanges: NodeChange[] = [{ type: 'add', item: node }];
|
||||
const edgeChanges: EdgeChange[] = [];
|
||||
nodes.forEach(({ id, selected }) => {
|
||||
if (selected) {
|
||||
nodeChanges.push({ type: 'select', id, selected: false });
|
||||
}
|
||||
});
|
||||
edges.forEach(({ id, selected }) => {
|
||||
if (selected) {
|
||||
edgeChanges.push({ type: 'select', id, selected: false });
|
||||
}
|
||||
});
|
||||
|
||||
// Onwards!
|
||||
if (nodeChanges.length > 0) {
|
||||
dispatch(nodesChanged(nodeChanges));
|
||||
}
|
||||
if (edgeChanges.length > 0) {
|
||||
dispatch(edgesChanged(edgeChanges));
|
||||
}
|
||||
return node;
|
||||
},
|
||||
[dispatch, buildInvocation, toaster, t]
|
||||
[buildInvocation, store, dispatch, t, toaster]
|
||||
);
|
||||
|
||||
const onChange = useCallback<ComboboxOnChange>(
|
||||
@@ -131,52 +171,65 @@ const AddNodePopover = () => {
|
||||
if (!v) {
|
||||
return;
|
||||
}
|
||||
addNode(v.value);
|
||||
dispatch(addNodePopoverClosed());
|
||||
const node = addNode(v.value);
|
||||
|
||||
// Auto-connect an edge if we just added a node and have a pending connection
|
||||
if (pendingConnection && isInvocationNode(node)) {
|
||||
const edgePendingUpdate = $edgePendingUpdate.get();
|
||||
const { handleType } = pendingConnection;
|
||||
|
||||
const source = handleType === 'source' ? pendingConnection.nodeId : node.id;
|
||||
const sourceHandle = handleType === 'source' ? pendingConnection.handleId : null;
|
||||
const target = handleType === 'target' ? pendingConnection.nodeId : node.id;
|
||||
const targetHandle = handleType === 'target' ? pendingConnection.handleId : null;
|
||||
|
||||
const { nodes, edges } = store.getState().nodes.present;
|
||||
const connection = getFirstValidConnection(
|
||||
source,
|
||||
sourceHandle,
|
||||
target,
|
||||
targetHandle,
|
||||
nodes,
|
||||
edges,
|
||||
templates,
|
||||
edgePendingUpdate
|
||||
);
|
||||
if (connection) {
|
||||
const newEdge = connectionToEdge(connection);
|
||||
dispatch(edgesChanged([{ type: 'add', item: newEdge }]));
|
||||
}
|
||||
}
|
||||
|
||||
closeAddNodePopover();
|
||||
},
|
||||
[addNode, dispatch]
|
||||
[addNode, dispatch, pendingConnection, store, templates]
|
||||
);
|
||||
|
||||
const onClose = useCallback(() => {
|
||||
dispatch(addNodePopoverClosed());
|
||||
}, [dispatch]);
|
||||
|
||||
const onOpen = useCallback(() => {
|
||||
dispatch(addNodePopoverOpened());
|
||||
}, [dispatch]);
|
||||
|
||||
const handleHotkeyOpen: HotkeyCallback = useCallback(
|
||||
(e) => {
|
||||
const handleHotkeyOpen: HotkeyCallback = useCallback((e) => {
|
||||
if (!$isAddNodePopoverOpen.get()) {
|
||||
e.preventDefault();
|
||||
onOpen();
|
||||
openAddNodePopover();
|
||||
flushSync(() => {
|
||||
selectRef.current?.inputRef?.focus();
|
||||
});
|
||||
},
|
||||
[onOpen]
|
||||
);
|
||||
}
|
||||
}, []);
|
||||
|
||||
const handleHotkeyClose: HotkeyCallback = useCallback(() => {
|
||||
onClose();
|
||||
}, [onClose]);
|
||||
if ($isAddNodePopoverOpen.get()) {
|
||||
closeAddNodePopover();
|
||||
}
|
||||
}, []);
|
||||
|
||||
useHotkeys(['shift+a', 'space'], handleHotkeyOpen);
|
||||
useHotkeys(['escape'], handleHotkeyClose);
|
||||
const onKeyDown: KeyboardEventHandler = useCallback(
|
||||
(e) => {
|
||||
if (e.key === 'Escape') {
|
||||
onClose();
|
||||
}
|
||||
},
|
||||
[onClose]
|
||||
);
|
||||
useHotkeys(['escape'], handleHotkeyClose, { enableOnFormTags: ['TEXTAREA'] });
|
||||
|
||||
const noOptionsMessage = useCallback(() => t('nodes.noMatchingNodes'), [t]);
|
||||
|
||||
return (
|
||||
<Popover
|
||||
isOpen={isOpen}
|
||||
onClose={onClose}
|
||||
onClose={closeAddNodePopover}
|
||||
placement="bottom"
|
||||
openDelay={0}
|
||||
closeDelay={0}
|
||||
@@ -206,8 +259,7 @@ const AddNodePopover = () => {
|
||||
noOptionsMessage={noOptionsMessage}
|
||||
filterOption={filterOption}
|
||||
onChange={onChange}
|
||||
onMenuClose={onClose}
|
||||
onKeyDown={onKeyDown}
|
||||
onMenuClose={closeAddNodePopover}
|
||||
inputRef={inputRef}
|
||||
closeMenuOnSelect={false}
|
||||
/>
|
||||
|
||||
@@ -1,47 +1,42 @@
|
||||
import { useGlobalMenuClose, useToken } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppDispatch, useAppSelector, useAppStore } from 'app/store/storeHooks';
|
||||
import { useConnection } from 'features/nodes/hooks/useConnection';
|
||||
import { useCopyPaste } from 'features/nodes/hooks/useCopyPaste';
|
||||
import { useSyncExecutionState } from 'features/nodes/hooks/useExecutionState';
|
||||
import { useIsValidConnection } from 'features/nodes/hooks/useIsValidConnection';
|
||||
import { $mouseOverNode } from 'features/nodes/hooks/useMouseOverNode';
|
||||
import { useWorkflowWatcher } from 'features/nodes/hooks/useWorkflowWatcher';
|
||||
import {
|
||||
connectionEnded,
|
||||
connectionMade,
|
||||
connectionStarted,
|
||||
edgeAdded,
|
||||
edgeChangeStarted,
|
||||
edgeDeleted,
|
||||
$cursorPos,
|
||||
$didUpdateEdge,
|
||||
$edgePendingUpdate,
|
||||
$isAddNodePopoverOpen,
|
||||
$lastEdgeUpdateMouseEvent,
|
||||
$pendingConnection,
|
||||
$viewport,
|
||||
edgesChanged,
|
||||
edgesDeleted,
|
||||
nodesChanged,
|
||||
nodesDeleted,
|
||||
selectedAll,
|
||||
selectedEdgesChanged,
|
||||
selectedNodesChanged,
|
||||
selectionCopied,
|
||||
selectionPasted,
|
||||
viewportChanged,
|
||||
redo,
|
||||
undo,
|
||||
} from 'features/nodes/store/nodesSlice';
|
||||
import { $flow } from 'features/nodes/store/reactFlowInstance';
|
||||
import { connectionToEdge } from 'features/nodes/store/util/reactFlowUtil';
|
||||
import type { CSSProperties, MouseEvent } from 'react';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
import { useHotkeys } from 'react-hotkeys-hook';
|
||||
import type {
|
||||
OnConnect,
|
||||
OnConnectEnd,
|
||||
OnConnectStart,
|
||||
EdgeChange,
|
||||
NodeChange,
|
||||
OnEdgesChange,
|
||||
OnEdgesDelete,
|
||||
OnEdgeUpdateFunc,
|
||||
OnInit,
|
||||
OnMoveEnd,
|
||||
OnNodesChange,
|
||||
OnNodesDelete,
|
||||
OnSelectionChangeFunc,
|
||||
ProOptions,
|
||||
ReactFlowProps,
|
||||
XYPosition,
|
||||
ReactFlowState,
|
||||
} from 'reactflow';
|
||||
import { Background, ReactFlow } from 'reactflow';
|
||||
import { Background, ReactFlow, useStore as useReactFlowStore, useUpdateNodeInternals } from 'reactflow';
|
||||
|
||||
import CustomConnectionLine from './connectionLines/CustomConnectionLine';
|
||||
import InvocationCollapsedEdge from './edges/InvocationCollapsedEdge';
|
||||
@@ -50,8 +45,6 @@ import CurrentImageNode from './nodes/CurrentImage/CurrentImageNode';
|
||||
import InvocationNodeWrapper from './nodes/Invocation/InvocationNodeWrapper';
|
||||
import NotesNode from './nodes/Notes/NotesNode';
|
||||
|
||||
const DELETE_KEYS = ['Delete', 'Backspace'];
|
||||
|
||||
const edgeTypes = {
|
||||
collapsed: InvocationCollapsedEdge,
|
||||
default: InvocationDefaultEdge,
|
||||
@@ -68,17 +61,25 @@ const proOptions: ProOptions = { hideAttribution: true };
|
||||
|
||||
const snapGrid: [number, number] = [25, 25];
|
||||
|
||||
const selectCancelConnection = (state: ReactFlowState) => state.cancelConnection;
|
||||
|
||||
export const Flow = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const nodes = useAppSelector((s) => s.nodes.nodes);
|
||||
const edges = useAppSelector((s) => s.nodes.edges);
|
||||
const viewport = useAppSelector((s) => s.nodes.viewport);
|
||||
const shouldSnapToGrid = useAppSelector((s) => s.nodes.shouldSnapToGrid);
|
||||
const selectionMode = useAppSelector((s) => s.nodes.selectionMode);
|
||||
const nodes = useAppSelector((s) => s.nodes.present.nodes);
|
||||
const edges = useAppSelector((s) => s.nodes.present.edges);
|
||||
const viewport = useStore($viewport);
|
||||
const mayUndo = useAppSelector((s) => s.nodes.past.length > 0);
|
||||
const mayRedo = useAppSelector((s) => s.nodes.future.length > 0);
|
||||
const shouldSnapToGrid = useAppSelector((s) => s.workflowSettings.shouldSnapToGrid);
|
||||
const selectionMode = useAppSelector((s) => s.workflowSettings.selectionMode);
|
||||
const { onConnectStart, onConnect, onConnectEnd } = useConnection();
|
||||
const flowWrapper = useRef<HTMLDivElement>(null);
|
||||
const cursorPosition = useRef<XYPosition | null>(null);
|
||||
const isValidConnection = useIsValidConnection();
|
||||
const cancelConnection = useReactFlowStore(selectCancelConnection);
|
||||
const updateNodeInternals = useUpdateNodeInternals();
|
||||
const store = useAppStore();
|
||||
useWorkflowWatcher();
|
||||
useSyncExecutionState();
|
||||
const [borderRadius] = useToken('radii', ['base']);
|
||||
|
||||
const flowStyles = useMemo<CSSProperties>(
|
||||
@@ -89,73 +90,24 @@ export const Flow = memo(() => {
|
||||
);
|
||||
|
||||
const onNodesChange: OnNodesChange = useCallback(
|
||||
(changes) => {
|
||||
dispatch(nodesChanged(changes));
|
||||
(nodeChanges) => {
|
||||
dispatch(nodesChanged(nodeChanges));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const onEdgesChange: OnEdgesChange = useCallback(
|
||||
(changes) => {
|
||||
dispatch(edgesChanged(changes));
|
||||
if (changes.length > 0) {
|
||||
dispatch(edgesChanged(changes));
|
||||
}
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const onConnectStart: OnConnectStart = useCallback(
|
||||
(event, params) => {
|
||||
dispatch(connectionStarted(params));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const onConnect: OnConnect = useCallback(
|
||||
(connection) => {
|
||||
dispatch(connectionMade(connection));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const onConnectEnd: OnConnectEnd = useCallback(() => {
|
||||
if (!cursorPosition.current) {
|
||||
return;
|
||||
}
|
||||
dispatch(
|
||||
connectionEnded({
|
||||
cursorPosition: cursorPosition.current,
|
||||
mouseOverNodeId: $mouseOverNode.get(),
|
||||
})
|
||||
);
|
||||
}, [dispatch]);
|
||||
|
||||
const onEdgesDelete: OnEdgesDelete = useCallback(
|
||||
(edges) => {
|
||||
dispatch(edgesDeleted(edges));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const onNodesDelete: OnNodesDelete = useCallback(
|
||||
(nodes) => {
|
||||
dispatch(nodesDeleted(nodes));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const handleSelectionChange: OnSelectionChangeFunc = useCallback(
|
||||
({ nodes, edges }) => {
|
||||
dispatch(selectedNodesChanged(nodes ? nodes.map((n) => n.id) : []));
|
||||
dispatch(selectedEdgesChanged(edges ? edges.map((e) => e.id) : []));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const handleMoveEnd: OnMoveEnd = useCallback(
|
||||
(e, viewport) => {
|
||||
dispatch(viewportChanged(viewport));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
const handleMoveEnd: OnMoveEnd = useCallback((e, viewport) => {
|
||||
$viewport.set(viewport);
|
||||
}, []);
|
||||
|
||||
const { onCloseGlobal } = useGlobalMenuClose();
|
||||
const handlePaneClick = useCallback(() => {
|
||||
@@ -169,11 +121,12 @@ export const Flow = memo(() => {
|
||||
|
||||
const onMouseMove = useCallback((event: MouseEvent<HTMLDivElement>) => {
|
||||
if (flowWrapper.current?.getBoundingClientRect()) {
|
||||
cursorPosition.current =
|
||||
$cursorPos.set(
|
||||
$flow.get()?.screenToFlowPosition({
|
||||
x: event.clientX,
|
||||
y: event.clientY,
|
||||
}) ?? null;
|
||||
}) ?? null
|
||||
);
|
||||
}
|
||||
}, []);
|
||||
|
||||
@@ -189,67 +142,157 @@ export const Flow = memo(() => {
|
||||
* where the edge is deleted if you click it accidentally).
|
||||
*/
|
||||
|
||||
// We have a ref for cursor position, but it is the *projected* cursor position.
|
||||
// Easiest to just keep track of the last mouse event for this particular feature
|
||||
const edgeUpdateMouseEvent = useRef<MouseEvent>();
|
||||
|
||||
const onEdgeUpdateStart: NonNullable<ReactFlowProps['onEdgeUpdateStart']> = useCallback(
|
||||
(e, edge, _handleType) => {
|
||||
// update mouse event
|
||||
edgeUpdateMouseEvent.current = e;
|
||||
// always delete the edge when starting an updated
|
||||
dispatch(edgeDeleted(edge.id));
|
||||
dispatch(edgeChangeStarted());
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
const onEdgeUpdateStart: NonNullable<ReactFlowProps['onEdgeUpdateStart']> = useCallback((e, edge, _handleType) => {
|
||||
$edgePendingUpdate.set(edge);
|
||||
$didUpdateEdge.set(false);
|
||||
$lastEdgeUpdateMouseEvent.set(e);
|
||||
}, []);
|
||||
|
||||
const onEdgeUpdate: OnEdgeUpdateFunc = useCallback(
|
||||
(_oldEdge, newConnection) => {
|
||||
// instead of updating the edge (we deleted it earlier), we instead create
|
||||
// a new one.
|
||||
dispatch(connectionMade(newConnection));
|
||||
(oldEdge, newConnection) => {
|
||||
// This event is fired when an edge update is successful
|
||||
$didUpdateEdge.set(true);
|
||||
// When an edge update is successful, we need to delete the old edge and create a new one
|
||||
const newEdge = connectionToEdge(newConnection);
|
||||
dispatch(
|
||||
edgesChanged([
|
||||
{ type: 'remove', id: oldEdge.id },
|
||||
{ type: 'add', item: newEdge },
|
||||
])
|
||||
);
|
||||
// Because we shift the position of handles depending on whether a field is connected or not, we must use
|
||||
// updateNodeInternals to tell reactflow to recalculate the positions of the handles
|
||||
updateNodeInternals([oldEdge.source, oldEdge.target, newEdge.source, newEdge.target]);
|
||||
},
|
||||
[dispatch]
|
||||
[dispatch, updateNodeInternals]
|
||||
);
|
||||
|
||||
const onEdgeUpdateEnd: NonNullable<ReactFlowProps['onEdgeUpdateEnd']> = useCallback(
|
||||
(e, edge, _handleType) => {
|
||||
// Handle the case where user begins a drag but didn't move the cursor -
|
||||
// bc we deleted the edge, we need to add it back
|
||||
if (
|
||||
// ignore touch events
|
||||
!('touches' in e) &&
|
||||
edgeUpdateMouseEvent.current?.clientX === e.clientX &&
|
||||
edgeUpdateMouseEvent.current?.clientY === e.clientY
|
||||
) {
|
||||
dispatch(edgeAdded(edge));
|
||||
const didUpdateEdge = $didUpdateEdge.get();
|
||||
// Fall back to a reasonable default event
|
||||
const lastEvent = $lastEdgeUpdateMouseEvent.get() ?? { clientX: 0, clientY: 0 };
|
||||
// We have to narrow this event down to MouseEvents - could be TouchEvent
|
||||
const didMouseMove =
|
||||
!('touches' in e) && Math.hypot(e.clientX - lastEvent.clientX, e.clientY - lastEvent.clientY) > 5;
|
||||
|
||||
// If we got this far and did not successfully update an edge, and the mouse moved away from the handle,
|
||||
// the user probably intended to delete the edge
|
||||
if (!didUpdateEdge && didMouseMove) {
|
||||
dispatch(edgesChanged([{ type: 'remove', id: edge.id }]));
|
||||
}
|
||||
// reset mouse event
|
||||
edgeUpdateMouseEvent.current = undefined;
|
||||
|
||||
$edgePendingUpdate.set(null);
|
||||
$didUpdateEdge.set(false);
|
||||
$pendingConnection.set(null);
|
||||
$lastEdgeUpdateMouseEvent.set(null);
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
// #endregion
|
||||
|
||||
useHotkeys(['Ctrl+c', 'Meta+c'], (e) => {
|
||||
e.preventDefault();
|
||||
dispatch(selectionCopied());
|
||||
});
|
||||
const { copySelection, pasteSelection } = useCopyPaste();
|
||||
|
||||
useHotkeys(['Ctrl+a', 'Meta+a'], (e) => {
|
||||
e.preventDefault();
|
||||
dispatch(selectedAll());
|
||||
});
|
||||
const onCopyHotkey = useCallback(
|
||||
(e: KeyboardEvent) => {
|
||||
e.preventDefault();
|
||||
copySelection();
|
||||
},
|
||||
[copySelection]
|
||||
);
|
||||
useHotkeys(['Ctrl+c', 'Meta+c'], onCopyHotkey);
|
||||
|
||||
useHotkeys(['Ctrl+v', 'Meta+v'], (e) => {
|
||||
if (!cursorPosition.current) {
|
||||
return;
|
||||
const onSelectAllHotkey = useCallback(
|
||||
(e: KeyboardEvent) => {
|
||||
e.preventDefault();
|
||||
const { nodes, edges } = store.getState().nodes.present;
|
||||
const nodeChanges: NodeChange[] = [];
|
||||
const edgeChanges: EdgeChange[] = [];
|
||||
nodes.forEach(({ id, selected }) => {
|
||||
if (!selected) {
|
||||
nodeChanges.push({ type: 'select', id, selected: true });
|
||||
}
|
||||
});
|
||||
edges.forEach(({ id, selected }) => {
|
||||
if (!selected) {
|
||||
edgeChanges.push({ type: 'select', id, selected: true });
|
||||
}
|
||||
});
|
||||
if (nodeChanges.length > 0) {
|
||||
dispatch(nodesChanged(nodeChanges));
|
||||
}
|
||||
if (edgeChanges.length > 0) {
|
||||
dispatch(edgesChanged(edgeChanges));
|
||||
}
|
||||
},
|
||||
[dispatch, store]
|
||||
);
|
||||
useHotkeys(['Ctrl+a', 'Meta+a'], onSelectAllHotkey);
|
||||
|
||||
const onPasteHotkey = useCallback(
|
||||
(e: KeyboardEvent) => {
|
||||
e.preventDefault();
|
||||
pasteSelection();
|
||||
},
|
||||
[pasteSelection]
|
||||
);
|
||||
useHotkeys(['Ctrl+v', 'Meta+v'], onPasteHotkey);
|
||||
|
||||
const onPasteWithEdgesToNodesHotkey = useCallback(
|
||||
(e: KeyboardEvent) => {
|
||||
e.preventDefault();
|
||||
pasteSelection(true);
|
||||
},
|
||||
[pasteSelection]
|
||||
);
|
||||
useHotkeys(['Ctrl+shift+v', 'Meta+shift+v'], onPasteWithEdgesToNodesHotkey);
|
||||
|
||||
const onUndoHotkey = useCallback(() => {
|
||||
if (mayUndo) {
|
||||
dispatch(undo());
|
||||
}
|
||||
e.preventDefault();
|
||||
dispatch(selectionPasted({ cursorPosition: cursorPosition.current }));
|
||||
});
|
||||
}, [dispatch, mayUndo]);
|
||||
useHotkeys(['meta+z', 'ctrl+z'], onUndoHotkey);
|
||||
|
||||
const onRedoHotkey = useCallback(() => {
|
||||
if (mayRedo) {
|
||||
dispatch(redo());
|
||||
}
|
||||
}, [dispatch, mayRedo]);
|
||||
useHotkeys(['meta+shift+z', 'ctrl+shift+z'], onRedoHotkey);
|
||||
|
||||
const onEscapeHotkey = useCallback(() => {
|
||||
if (!$edgePendingUpdate.get()) {
|
||||
$pendingConnection.set(null);
|
||||
$isAddNodePopoverOpen.set(false);
|
||||
cancelConnection();
|
||||
}
|
||||
}, [cancelConnection]);
|
||||
useHotkeys('esc', onEscapeHotkey);
|
||||
|
||||
const onDeleteHotkey = useCallback(() => {
|
||||
const { nodes, edges } = store.getState().nodes.present;
|
||||
const nodeChanges: NodeChange[] = [];
|
||||
const edgeChanges: EdgeChange[] = [];
|
||||
nodes
|
||||
.filter((n) => n.selected)
|
||||
.forEach(({ id }) => {
|
||||
nodeChanges.push({ type: 'remove', id });
|
||||
});
|
||||
edges
|
||||
.filter((e) => e.selected)
|
||||
.forEach(({ id }) => {
|
||||
edgeChanges.push({ type: 'remove', id });
|
||||
});
|
||||
if (nodeChanges.length > 0) {
|
||||
dispatch(nodesChanged(nodeChanges));
|
||||
}
|
||||
if (edgeChanges.length > 0) {
|
||||
dispatch(edgesChanged(edgeChanges));
|
||||
}
|
||||
}, [dispatch, store]);
|
||||
useHotkeys(['delete', 'backspace'], onDeleteHotkey);
|
||||
|
||||
return (
|
||||
<ReactFlow
|
||||
@@ -264,17 +307,14 @@ export const Flow = memo(() => {
|
||||
onMouseMove={onMouseMove}
|
||||
onNodesChange={onNodesChange}
|
||||
onEdgesChange={onEdgesChange}
|
||||
onEdgesDelete={onEdgesDelete}
|
||||
onEdgeUpdate={onEdgeUpdate}
|
||||
onEdgeUpdateStart={onEdgeUpdateStart}
|
||||
onEdgeUpdateEnd={onEdgeUpdateEnd}
|
||||
onNodesDelete={onNodesDelete}
|
||||
onConnectStart={onConnectStart}
|
||||
onConnect={onConnect}
|
||||
onConnectEnd={onConnectEnd}
|
||||
onMoveEnd={handleMoveEnd}
|
||||
connectionLineComponent={CustomConnectionLine}
|
||||
onSelectionChange={handleSelectionChange}
|
||||
isValidConnection={isValidConnection}
|
||||
minZoom={0.1}
|
||||
snapToGrid={shouldSnapToGrid}
|
||||
@@ -283,8 +323,10 @@ export const Flow = memo(() => {
|
||||
proOptions={proOptions}
|
||||
style={flowStyles}
|
||||
onPaneClick={handlePaneClick}
|
||||
deleteKeyCode={DELETE_KEYS}
|
||||
deleteKeyCode={null}
|
||||
selectionMode={selectionMode}
|
||||
elevateEdgesOnSelect
|
||||
nodeDragThreshold={1}
|
||||
>
|
||||
<Background />
|
||||
</ReactFlow>
|
||||
|
||||
@@ -1,26 +1,33 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { colorTokenToCssVar } from 'common/util/colorTokenToCssVar';
|
||||
import { getFieldColor } from 'features/nodes/components/flow/edges/util/getEdgeColor';
|
||||
import { selectNodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import { $pendingConnection } from 'features/nodes/store/nodesSlice';
|
||||
import type { CSSProperties } from 'react';
|
||||
import { memo } from 'react';
|
||||
import { memo, useMemo } from 'react';
|
||||
import type { ConnectionLineComponentProps } from 'reactflow';
|
||||
import { getBezierPath } from 'reactflow';
|
||||
|
||||
const selectStroke = createSelector(selectNodesSlice, (nodes) =>
|
||||
nodes.shouldColorEdges ? getFieldColor(nodes.connectionStartFieldType) : colorTokenToCssVar('base.500')
|
||||
);
|
||||
|
||||
const selectClassName = createSelector(selectNodesSlice, (nodes) =>
|
||||
nodes.shouldAnimateEdges ? 'react-flow__custom_connection-path animated' : 'react-flow__custom_connection-path'
|
||||
);
|
||||
|
||||
const pathStyles: CSSProperties = { opacity: 0.8 };
|
||||
|
||||
const CustomConnectionLine = ({ fromX, fromY, fromPosition, toX, toY, toPosition }: ConnectionLineComponentProps) => {
|
||||
const stroke = useAppSelector(selectStroke);
|
||||
const className = useAppSelector(selectClassName);
|
||||
const pendingConnection = useStore($pendingConnection);
|
||||
const shouldColorEdges = useAppSelector((state) => state.workflowSettings.shouldColorEdges);
|
||||
const shouldAnimateEdges = useAppSelector((state) => state.workflowSettings.shouldAnimateEdges);
|
||||
const stroke = useMemo(() => {
|
||||
if (shouldColorEdges && pendingConnection) {
|
||||
return getFieldColor(pendingConnection.fieldTemplate.type);
|
||||
} else {
|
||||
return colorTokenToCssVar('base.500');
|
||||
}
|
||||
}, [pendingConnection, shouldColorEdges]);
|
||||
const className = useMemo(() => {
|
||||
if (shouldAnimateEdges) {
|
||||
return 'react-flow__custom_connection-path animated';
|
||||
} else {
|
||||
return 'react-flow__custom_connection-path';
|
||||
}
|
||||
}, [shouldAnimateEdges]);
|
||||
|
||||
const pathParams = {
|
||||
sourceX: fromX,
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
import { Badge, Flex } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useChakraThemeTokens } from 'common/hooks/useChakraThemeTokens';
|
||||
import { getEdgeStyles } from 'features/nodes/components/flow/edges/util/getEdgeColor';
|
||||
import { makeEdgeSelector } from 'features/nodes/components/flow/edges/util/makeEdgeSelector';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { memo, useMemo } from 'react';
|
||||
import type { EdgeProps } from 'reactflow';
|
||||
import { BaseEdge, EdgeLabelRenderer, getBezierPath } from 'reactflow';
|
||||
|
||||
import { makeEdgeSelector } from './util/makeEdgeSelector';
|
||||
|
||||
const InvocationCollapsedEdge = ({
|
||||
sourceX,
|
||||
sourceY,
|
||||
@@ -16,18 +18,19 @@ const InvocationCollapsedEdge = ({
|
||||
targetPosition,
|
||||
markerEnd,
|
||||
data,
|
||||
selected,
|
||||
selected = false,
|
||||
source,
|
||||
target,
|
||||
sourceHandleId,
|
||||
target,
|
||||
targetHandleId,
|
||||
}: EdgeProps<{ count: number }>) => {
|
||||
const templates = useStore($templates);
|
||||
const selector = useMemo(
|
||||
() => makeEdgeSelector(source, sourceHandleId, target, targetHandleId, selected),
|
||||
[selected, source, sourceHandleId, target, targetHandleId]
|
||||
() => makeEdgeSelector(templates, source, sourceHandleId, target, targetHandleId),
|
||||
[templates, source, sourceHandleId, target, targetHandleId]
|
||||
);
|
||||
|
||||
const { isSelected, shouldAnimate } = useAppSelector(selector);
|
||||
const { shouldAnimateEdges, areConnectedNodesSelected } = useAppSelector(selector);
|
||||
|
||||
const [edgePath, labelX, labelY] = getBezierPath({
|
||||
sourceX,
|
||||
@@ -41,14 +44,8 @@ const InvocationCollapsedEdge = ({
|
||||
const { base500 } = useChakraThemeTokens();
|
||||
|
||||
const edgeStyles = useMemo(
|
||||
() => ({
|
||||
strokeWidth: isSelected ? 3 : 2,
|
||||
stroke: base500,
|
||||
opacity: isSelected ? 0.8 : 0.5,
|
||||
animation: shouldAnimate ? 'dashdraw 0.5s linear infinite' : undefined,
|
||||
strokeDasharray: shouldAnimate ? 5 : 'none',
|
||||
}),
|
||||
[base500, isSelected, shouldAnimate]
|
||||
() => getEdgeStyles(base500, selected, shouldAnimateEdges, areConnectedNodesSelected),
|
||||
[areConnectedNodesSelected, base500, selected, shouldAnimateEdges]
|
||||
);
|
||||
|
||||
return (
|
||||
@@ -57,11 +54,15 @@ const InvocationCollapsedEdge = ({
|
||||
{data?.count && data.count > 1 && (
|
||||
<EdgeLabelRenderer>
|
||||
<Flex
|
||||
data-testid="asdfasdfasdf"
|
||||
position="absolute"
|
||||
transform={`translate(-50%, -50%) translate(${labelX}px,${labelY}px)`}
|
||||
className="nodrag nopan"
|
||||
// Unfortunately edge labels do not get the same zIndex treatment as edges do, so we need to manage this ourselves
|
||||
// See: https://github.com/xyflow/xyflow/issues/3658
|
||||
zIndex={1001}
|
||||
>
|
||||
<Badge variant="solid" bg="base.500" opacity={isSelected ? 0.8 : 0.5} boxShadow="base">
|
||||
<Badge variant="solid" bg="base.500" opacity={selected ? 0.8 : 0.5} boxShadow="base">
|
||||
{data.count}
|
||||
</Badge>
|
||||
</Flex>
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import type { CSSProperties } from 'react';
|
||||
import { getEdgeStyles } from 'features/nodes/components/flow/edges/util/getEdgeColor';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { memo, useMemo } from 'react';
|
||||
import type { EdgeProps } from 'reactflow';
|
||||
import { BaseEdge, EdgeLabelRenderer, getBezierPath } from 'reactflow';
|
||||
@@ -15,19 +17,20 @@ const InvocationDefaultEdge = ({
|
||||
sourcePosition,
|
||||
targetPosition,
|
||||
markerEnd,
|
||||
selected,
|
||||
selected = false,
|
||||
source,
|
||||
target,
|
||||
sourceHandleId,
|
||||
targetHandleId,
|
||||
}: EdgeProps) => {
|
||||
const templates = useStore($templates);
|
||||
const selector = useMemo(
|
||||
() => makeEdgeSelector(source, sourceHandleId, target, targetHandleId, selected),
|
||||
[source, sourceHandleId, target, targetHandleId, selected]
|
||||
() => makeEdgeSelector(templates, source, sourceHandleId, target, targetHandleId),
|
||||
[templates, source, sourceHandleId, target, targetHandleId]
|
||||
);
|
||||
|
||||
const { isSelected, shouldAnimate, stroke, label } = useAppSelector(selector);
|
||||
const shouldShowEdgeLabels = useAppSelector((s) => s.nodes.shouldShowEdgeLabels);
|
||||
const { shouldAnimateEdges, areConnectedNodesSelected, stroke, label } = useAppSelector(selector);
|
||||
const shouldShowEdgeLabels = useAppSelector((s) => s.workflowSettings.shouldShowEdgeLabels);
|
||||
|
||||
const [edgePath, labelX, labelY] = getBezierPath({
|
||||
sourceX,
|
||||
@@ -38,15 +41,9 @@ const InvocationDefaultEdge = ({
|
||||
targetPosition,
|
||||
});
|
||||
|
||||
const edgeStyles = useMemo<CSSProperties>(
|
||||
() => ({
|
||||
strokeWidth: isSelected ? 3 : 2,
|
||||
stroke,
|
||||
opacity: isSelected ? 0.8 : 0.5,
|
||||
animation: shouldAnimate ? 'dashdraw 0.5s linear infinite' : undefined,
|
||||
strokeDasharray: shouldAnimate ? 5 : 'none',
|
||||
}),
|
||||
[isSelected, shouldAnimate, stroke]
|
||||
const edgeStyles = useMemo(
|
||||
() => getEdgeStyles(stroke, selected, shouldAnimateEdges, areConnectedNodesSelected),
|
||||
[areConnectedNodesSelected, stroke, selected, shouldAnimateEdges]
|
||||
);
|
||||
|
||||
return (
|
||||
@@ -62,13 +59,13 @@ const InvocationDefaultEdge = ({
|
||||
bg="base.800"
|
||||
borderRadius="base"
|
||||
borderWidth={1}
|
||||
borderColor={isSelected ? 'undefined' : 'transparent'}
|
||||
opacity={isSelected ? 1 : 0.5}
|
||||
borderColor={selected ? 'undefined' : 'transparent'}
|
||||
opacity={selected ? 1 : 0.5}
|
||||
py={1}
|
||||
px={3}
|
||||
shadow="md"
|
||||
>
|
||||
<Text size="sm" fontWeight="semibold" color={isSelected ? 'base.100' : 'base.300'}>
|
||||
<Text size="sm" fontWeight="semibold" color={selected ? 'base.100' : 'base.300'}>
|
||||
{label}
|
||||
</Text>
|
||||
</Flex>
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { colorTokenToCssVar } from 'common/util/colorTokenToCssVar';
|
||||
import { FIELD_COLORS } from 'features/nodes/types/constants';
|
||||
import type { FieldType } from 'features/nodes/types/field';
|
||||
import type { CSSProperties } from 'react';
|
||||
|
||||
export const getFieldColor = (fieldType: FieldType | null): string => {
|
||||
if (!fieldType) {
|
||||
@@ -10,3 +11,16 @@ export const getFieldColor = (fieldType: FieldType | null): string => {
|
||||
|
||||
return color ? colorTokenToCssVar(color) : colorTokenToCssVar('base.500');
|
||||
};
|
||||
|
||||
export const getEdgeStyles = (
|
||||
stroke: string,
|
||||
selected: boolean,
|
||||
shouldAnimateEdges: boolean,
|
||||
areConnectedNodesSelected: boolean
|
||||
): CSSProperties => ({
|
||||
strokeWidth: 3,
|
||||
stroke,
|
||||
opacity: selected ? 1 : 0.5,
|
||||
animation: shouldAnimateEdges ? 'dashdraw 0.5s linear infinite' : undefined,
|
||||
strokeDasharray: selected || areConnectedNodesSelected ? 5 : 'none',
|
||||
});
|
||||
|
||||
@@ -1,53 +1,58 @@
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { colorTokenToCssVar } from 'common/util/colorTokenToCssVar';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { selectNodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import { selectFieldOutputTemplate, selectNodeTemplate } from 'features/nodes/store/selectors';
|
||||
import type { Templates } from 'features/nodes/store/types';
|
||||
import { selectWorkflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
|
||||
import { getFieldColor } from './getEdgeColor';
|
||||
|
||||
const defaultReturnValue = {
|
||||
isSelected: false,
|
||||
shouldAnimate: false,
|
||||
areConnectedNodesSelected: false,
|
||||
shouldAnimateEdges: false,
|
||||
stroke: colorTokenToCssVar('base.500'),
|
||||
label: '',
|
||||
};
|
||||
|
||||
export const makeEdgeSelector = (
|
||||
templates: Templates,
|
||||
source: string,
|
||||
sourceHandleId: string | null | undefined,
|
||||
target: string,
|
||||
targetHandleId: string | null | undefined,
|
||||
selected?: boolean
|
||||
targetHandleId: string | null | undefined
|
||||
) =>
|
||||
createMemoizedSelector(
|
||||
selectNodesSlice,
|
||||
(nodes): { isSelected: boolean; shouldAnimate: boolean; stroke: string; label: string } => {
|
||||
selectWorkflowSettingsSlice,
|
||||
(
|
||||
nodes,
|
||||
workflowSettings
|
||||
): { areConnectedNodesSelected: boolean; shouldAnimateEdges: boolean; stroke: string; label: string } => {
|
||||
const { shouldAnimateEdges, shouldColorEdges } = workflowSettings;
|
||||
const sourceNode = nodes.nodes.find((node) => node.id === source);
|
||||
const targetNode = nodes.nodes.find((node) => node.id === target);
|
||||
|
||||
const returnValue = deepClone(defaultReturnValue);
|
||||
returnValue.shouldAnimateEdges = shouldAnimateEdges;
|
||||
|
||||
const isInvocationToInvocationEdge = isInvocationNode(sourceNode) && isInvocationNode(targetNode);
|
||||
|
||||
const isSelected = Boolean(sourceNode?.selected || targetNode?.selected || selected);
|
||||
returnValue.areConnectedNodesSelected = Boolean(sourceNode?.selected || targetNode?.selected);
|
||||
if (!sourceNode || !sourceHandleId || !targetNode || !targetHandleId) {
|
||||
return defaultReturnValue;
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
const outputFieldTemplate = selectFieldOutputTemplate(nodes, sourceNode.id, sourceHandleId);
|
||||
const sourceNodeTemplate = templates[sourceNode.data.type];
|
||||
const targetNodeTemplate = templates[targetNode.data.type];
|
||||
|
||||
const outputFieldTemplate = sourceNodeTemplate?.outputs[sourceHandleId];
|
||||
const sourceType = isInvocationToInvocationEdge ? outputFieldTemplate?.type : undefined;
|
||||
|
||||
const stroke = sourceType && nodes.shouldColorEdges ? getFieldColor(sourceType) : colorTokenToCssVar('base.500');
|
||||
returnValue.stroke = sourceType && shouldColorEdges ? getFieldColor(sourceType) : colorTokenToCssVar('base.500');
|
||||
|
||||
const sourceNodeTemplate = selectNodeTemplate(nodes, sourceNode.id);
|
||||
const targetNodeTemplate = selectNodeTemplate(nodes, targetNode.id);
|
||||
returnValue.label = `${sourceNodeTemplate?.title || sourceNode.data?.label} -> ${targetNodeTemplate?.title || targetNode.data?.label}`;
|
||||
|
||||
const label = `${sourceNodeTemplate?.title || sourceNode.data?.label} -> ${targetNodeTemplate?.title || targetNode.data?.label}`;
|
||||
|
||||
return {
|
||||
isSelected,
|
||||
shouldAnimate: nodes.shouldAnimateEdges && isSelected,
|
||||
stroke,
|
||||
label,
|
||||
};
|
||||
return returnValue;
|
||||
}
|
||||
);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { Flex, Grid, GridItem } from '@invoke-ai/ui-library';
|
||||
import NodeWrapper from 'features/nodes/components/flow/nodes/common/NodeWrapper';
|
||||
import { useAnyOrDirectInputFieldNames } from 'features/nodes/hooks/useAnyOrDirectInputFieldNames';
|
||||
import { useConnectionInputFieldNames } from 'features/nodes/hooks/useConnectionInputFieldNames';
|
||||
import { InvocationInputFieldCheck } from 'features/nodes/components/flow/nodes/Invocation/fields/InvocationFieldCheck';
|
||||
import { useFieldNames } from 'features/nodes/hooks/useFieldNames';
|
||||
import { useOutputFieldNames } from 'features/nodes/hooks/useOutputFieldNames';
|
||||
import { useWithFooter } from 'features/nodes/hooks/useWithFooter';
|
||||
import { memo } from 'react';
|
||||
@@ -20,8 +20,7 @@ type Props = {
|
||||
};
|
||||
|
||||
const InvocationNode = ({ nodeId, isOpen, label, type, selected }: Props) => {
|
||||
const inputConnectionFieldNames = useConnectionInputFieldNames(nodeId);
|
||||
const inputAnyOrDirectFieldNames = useAnyOrDirectInputFieldNames(nodeId);
|
||||
const fieldNames = useFieldNames(nodeId);
|
||||
const withFooter = useWithFooter(nodeId);
|
||||
const outputFieldNames = useOutputFieldNames(nodeId);
|
||||
|
||||
@@ -41,9 +40,11 @@ const InvocationNode = ({ nodeId, isOpen, label, type, selected }: Props) => {
|
||||
>
|
||||
<Flex flexDir="column" px={2} w="full" h="full">
|
||||
<Grid gridTemplateColumns="1fr auto" gridAutoRows="1fr">
|
||||
{inputConnectionFieldNames.map((fieldName, i) => (
|
||||
{fieldNames.connectionFields.map((fieldName, i) => (
|
||||
<GridItem gridColumnStart={1} gridRowStart={i + 1} key={`${nodeId}.${fieldName}.input-field`}>
|
||||
<InputField nodeId={nodeId} fieldName={fieldName} />
|
||||
<InvocationInputFieldCheck nodeId={nodeId} fieldName={fieldName}>
|
||||
<InputField nodeId={nodeId} fieldName={fieldName} />
|
||||
</InvocationInputFieldCheck>
|
||||
</GridItem>
|
||||
))}
|
||||
{outputFieldNames.map((fieldName, i) => (
|
||||
@@ -52,8 +53,23 @@ const InvocationNode = ({ nodeId, isOpen, label, type, selected }: Props) => {
|
||||
</GridItem>
|
||||
))}
|
||||
</Grid>
|
||||
{inputAnyOrDirectFieldNames.map((fieldName) => (
|
||||
<InputField key={`${nodeId}.${fieldName}.input-field`} nodeId={nodeId} fieldName={fieldName} />
|
||||
{fieldNames.anyOrDirectFields.map((fieldName) => (
|
||||
<InvocationInputFieldCheck
|
||||
key={`${nodeId}.${fieldName}.input-field`}
|
||||
nodeId={nodeId}
|
||||
fieldName={fieldName}
|
||||
>
|
||||
<InputField nodeId={nodeId} fieldName={fieldName} />
|
||||
</InvocationInputFieldCheck>
|
||||
))}
|
||||
{fieldNames.missingFields.map((fieldName) => (
|
||||
<InvocationInputFieldCheck
|
||||
key={`${nodeId}.${fieldName}.input-field`}
|
||||
nodeId={nodeId}
|
||||
fieldName={fieldName}
|
||||
>
|
||||
<InputField nodeId={nodeId} fieldName={fieldName} />
|
||||
</InvocationInputFieldCheck>
|
||||
))}
|
||||
</Flex>
|
||||
</Flex>
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Badge, CircularProgress, Flex, Icon, Image, Text, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectNodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import { useExecutionState } from 'features/nodes/hooks/useExecutionState';
|
||||
import { DRAG_HANDLE_CLASSNAME } from 'features/nodes/types/constants';
|
||||
import type { NodeExecutionState } from 'features/nodes/types/invocation';
|
||||
import { zNodeStatus } from 'features/nodes/types/invocation';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCheckBold, PiDotsThreeOutlineFill, PiWarningBold } from 'react-icons/pi';
|
||||
|
||||
@@ -24,12 +22,7 @@ const circleStyles: SystemStyleObject = {
|
||||
};
|
||||
|
||||
const InvocationNodeStatusIndicator = ({ nodeId }: Props) => {
|
||||
const selectNodeExecutionState = useMemo(
|
||||
() => createMemoizedSelector(selectNodesSlice, (nodes) => nodes.nodeExecutionStates[nodeId]),
|
||||
[nodeId]
|
||||
);
|
||||
|
||||
const nodeExecutionState = useAppSelector(selectNodeExecutionState);
|
||||
const nodeExecutionState = useExecutionState(nodeId);
|
||||
|
||||
if (!nodeExecutionState) {
|
||||
return null;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import InvocationNode from 'features/nodes/components/flow/nodes/Invocation/InvocationNode';
|
||||
import { selectNodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import type { InvocationNodeData } from 'features/nodes/types/invocation';
|
||||
import { memo, useMemo } from 'react';
|
||||
import type { NodeProps } from 'reactflow';
|
||||
@@ -11,13 +11,13 @@ import InvocationNodeUnknownFallback from './InvocationNodeUnknownFallback';
|
||||
const InvocationNodeWrapper = (props: NodeProps<InvocationNodeData>) => {
|
||||
const { data, selected } = props;
|
||||
const { id: nodeId, type, isOpen, label } = data;
|
||||
const templates = useStore($templates);
|
||||
const hasTemplate = useMemo(() => Boolean(templates[type]), [templates, type]);
|
||||
const nodeExists = useAppSelector((s) => Boolean(s.nodes.present.nodes.find((n) => n.id === nodeId)));
|
||||
|
||||
const hasTemplateSelector = useMemo(
|
||||
() => createSelector(selectNodesSlice, (nodes) => Boolean(nodes.templates[type])),
|
||||
[type]
|
||||
);
|
||||
|
||||
const hasTemplate = useAppSelector(hasTemplateSelector);
|
||||
if (!nodeExists) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!hasTemplate) {
|
||||
return (
|
||||
|
||||
@@ -25,10 +25,11 @@ interface Props {
|
||||
kind: 'inputs' | 'outputs';
|
||||
isMissingInput?: boolean;
|
||||
withTooltip?: boolean;
|
||||
shouldDim?: boolean;
|
||||
}
|
||||
|
||||
const EditableFieldTitle = forwardRef((props: Props, ref) => {
|
||||
const { nodeId, fieldName, kind, isMissingInput = false, withTooltip = false } = props;
|
||||
const { nodeId, fieldName, kind, isMissingInput = false, withTooltip = false, shouldDim = false } = props;
|
||||
const label = useFieldLabel(nodeId, fieldName);
|
||||
const fieldTemplateTitle = useFieldTemplateTitle(nodeId, fieldName, kind);
|
||||
const { t } = useTranslation();
|
||||
@@ -37,14 +38,13 @@ const EditableFieldTitle = forwardRef((props: Props, ref) => {
|
||||
const [localTitle, setLocalTitle] = useState(label || fieldTemplateTitle || t('nodes.unknownField'));
|
||||
|
||||
const handleSubmit = useCallback(
|
||||
async (newTitle: string) => {
|
||||
if (newTitle && (newTitle === label || newTitle === fieldTemplateTitle)) {
|
||||
return;
|
||||
}
|
||||
setLocalTitle(newTitle || fieldTemplateTitle || t('nodes.unknownField'));
|
||||
dispatch(fieldLabelChanged({ nodeId, fieldName, label: newTitle }));
|
||||
async (newTitleRaw: string) => {
|
||||
const newTitle = newTitleRaw.trim();
|
||||
const finalTitle = newTitle || fieldTemplateTitle || t('nodes.unknownField');
|
||||
setLocalTitle(finalTitle);
|
||||
dispatch(fieldLabelChanged({ nodeId, fieldName, label: finalTitle }));
|
||||
},
|
||||
[label, fieldTemplateTitle, dispatch, nodeId, fieldName, t]
|
||||
[fieldTemplateTitle, dispatch, nodeId, fieldName, t]
|
||||
);
|
||||
|
||||
const handleChange = useCallback((newTitle: string) => {
|
||||
@@ -57,33 +57,34 @@ const EditableFieldTitle = forwardRef((props: Props, ref) => {
|
||||
}, [label, fieldTemplateTitle, t]);
|
||||
|
||||
return (
|
||||
<Tooltip
|
||||
label={withTooltip ? <FieldTooltipContent nodeId={nodeId} fieldName={fieldName} kind="inputs" /> : undefined}
|
||||
openDelay={HANDLE_TOOLTIP_OPEN_DELAY}
|
||||
<Editable
|
||||
value={localTitle}
|
||||
onChange={handleChange}
|
||||
onSubmit={handleSubmit}
|
||||
as={Flex}
|
||||
ref={ref}
|
||||
position="relative"
|
||||
overflow="hidden"
|
||||
alignItems="center"
|
||||
justifyContent="flex-start"
|
||||
gap={1}
|
||||
w="full"
|
||||
>
|
||||
<Editable
|
||||
value={localTitle}
|
||||
onChange={handleChange}
|
||||
onSubmit={handleSubmit}
|
||||
as={Flex}
|
||||
ref={ref}
|
||||
position="relative"
|
||||
overflow="hidden"
|
||||
alignItems="center"
|
||||
justifyContent="flex-start"
|
||||
gap={1}
|
||||
w="full"
|
||||
<Tooltip
|
||||
label={withTooltip ? <FieldTooltipContent nodeId={nodeId} fieldName={fieldName} kind="inputs" /> : undefined}
|
||||
openDelay={HANDLE_TOOLTIP_OPEN_DELAY}
|
||||
>
|
||||
<EditablePreview
|
||||
fontWeight="semibold"
|
||||
sx={editablePreviewStyles}
|
||||
noOfLines={1}
|
||||
color={isMissingInput ? 'error.300' : 'base.300'}
|
||||
opacity={shouldDim ? 0.5 : 1}
|
||||
/>
|
||||
<EditableInput className="nodrag" sx={editableInputStyles} />
|
||||
<EditableControls />
|
||||
</Editable>
|
||||
</Tooltip>
|
||||
</Tooltip>
|
||||
<EditableInput className="nodrag" sx={editableInputStyles} />
|
||||
<EditableControls />
|
||||
</Editable>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -127,7 +128,15 @@ const EditableControls = memo(() => {
|
||||
}
|
||||
|
||||
return (
|
||||
<Flex onClick={handleClick} position="absolute" w="full" h="full" top={0} insetInlineStart={0} cursor="text" />
|
||||
<Flex
|
||||
onClick={handleClick}
|
||||
position="absolute"
|
||||
w="min-content"
|
||||
h="full"
|
||||
top={0}
|
||||
insetInlineStart={0}
|
||||
cursor="text"
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -2,10 +2,12 @@ import { Tooltip } from '@invoke-ai/ui-library';
|
||||
import { colorTokenToCssVar } from 'common/util/colorTokenToCssVar';
|
||||
import { getFieldColor } from 'features/nodes/components/flow/edges/util/getEdgeColor';
|
||||
import { useFieldTypeName } from 'features/nodes/hooks/usePrettyFieldType';
|
||||
import type { ValidationResult } from 'features/nodes/store/util/validateConnection';
|
||||
import { HANDLE_TOOLTIP_OPEN_DELAY, MODEL_TYPES } from 'features/nodes/types/constants';
|
||||
import type { FieldInputTemplate, FieldOutputTemplate } from 'features/nodes/types/field';
|
||||
import { type FieldInputTemplate, type FieldOutputTemplate, isSingle } from 'features/nodes/types/field';
|
||||
import type { CSSProperties } from 'react';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import type { HandleType } from 'reactflow';
|
||||
import { Handle, Position } from 'reactflow';
|
||||
|
||||
@@ -14,11 +16,12 @@ type FieldHandleProps = {
|
||||
handleType: HandleType;
|
||||
isConnectionInProgress: boolean;
|
||||
isConnectionStartField: boolean;
|
||||
connectionError?: string;
|
||||
validationResult: ValidationResult;
|
||||
};
|
||||
|
||||
const FieldHandle = (props: FieldHandleProps) => {
|
||||
const { fieldTemplate, handleType, isConnectionInProgress, isConnectionStartField, connectionError } = props;
|
||||
const { fieldTemplate, handleType, isConnectionInProgress, isConnectionStartField, validationResult } = props;
|
||||
const { t } = useTranslation();
|
||||
const { name } = fieldTemplate;
|
||||
const type = fieldTemplate.type;
|
||||
const fieldTypeName = useFieldTypeName(type);
|
||||
@@ -26,11 +29,11 @@ const FieldHandle = (props: FieldHandleProps) => {
|
||||
const isModelType = MODEL_TYPES.some((t) => t === type.name);
|
||||
const color = getFieldColor(type);
|
||||
const s: CSSProperties = {
|
||||
backgroundColor: type.isCollection || type.isCollectionOrScalar ? colorTokenToCssVar('base.900') : color,
|
||||
backgroundColor: !isSingle(type) ? colorTokenToCssVar('base.900') : color,
|
||||
position: 'absolute',
|
||||
width: '1rem',
|
||||
height: '1rem',
|
||||
borderWidth: type.isCollection || type.isCollectionOrScalar ? 4 : 0,
|
||||
borderWidth: !isSingle(type) ? 4 : 0,
|
||||
borderStyle: 'solid',
|
||||
borderColor: color,
|
||||
borderRadius: isModelType ? 4 : '100%',
|
||||
@@ -43,11 +46,11 @@ const FieldHandle = (props: FieldHandleProps) => {
|
||||
s.insetInlineEnd = '-1rem';
|
||||
}
|
||||
|
||||
if (isConnectionInProgress && !isConnectionStartField && connectionError) {
|
||||
if (isConnectionInProgress && !isConnectionStartField && !validationResult.isValid) {
|
||||
s.filter = 'opacity(0.4) grayscale(0.7)';
|
||||
}
|
||||
|
||||
if (isConnectionInProgress && connectionError) {
|
||||
if (isConnectionInProgress && !validationResult.isValid) {
|
||||
if (isConnectionStartField) {
|
||||
s.cursor = 'grab';
|
||||
} else {
|
||||
@@ -58,14 +61,14 @@ const FieldHandle = (props: FieldHandleProps) => {
|
||||
}
|
||||
|
||||
return s;
|
||||
}, [connectionError, handleType, isConnectionInProgress, isConnectionStartField, type]);
|
||||
}, [handleType, isConnectionInProgress, isConnectionStartField, type, validationResult.isValid]);
|
||||
|
||||
const tooltip = useMemo(() => {
|
||||
if (isConnectionInProgress && connectionError) {
|
||||
return connectionError;
|
||||
if (isConnectionInProgress && validationResult.messageTKey) {
|
||||
return t(validationResult.messageTKey);
|
||||
}
|
||||
return fieldTypeName;
|
||||
}, [connectionError, fieldTypeName, isConnectionInProgress]);
|
||||
}, [fieldTypeName, isConnectionInProgress, t, validationResult.messageTKey]);
|
||||
|
||||
return (
|
||||
<Tooltip
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
import { Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { Flex, FormControl } from '@invoke-ai/ui-library';
|
||||
import { useConnectionState } from 'features/nodes/hooks/useConnectionState';
|
||||
import { useDoesInputHaveValue } from 'features/nodes/hooks/useDoesInputHaveValue';
|
||||
import { useFieldInputInstance } from 'features/nodes/hooks/useFieldInputInstance';
|
||||
import { useFieldInputTemplate } from 'features/nodes/hooks/useFieldInputTemplate';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo, useCallback, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import EditableFieldTitle from './EditableFieldTitle';
|
||||
import FieldHandle from './FieldHandle';
|
||||
import FieldLinearViewToggle from './FieldLinearViewToggle';
|
||||
import InputFieldRenderer from './InputFieldRenderer';
|
||||
import { InputFieldWrapper } from './InputFieldWrapper';
|
||||
|
||||
interface Props {
|
||||
nodeId: string;
|
||||
@@ -18,13 +16,11 @@ interface Props {
|
||||
}
|
||||
|
||||
const InputField = ({ nodeId, fieldName }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const fieldTemplate = useFieldInputTemplate(nodeId, fieldName);
|
||||
const fieldInstance = useFieldInputInstance(nodeId, fieldName);
|
||||
const doesFieldHaveValue = useDoesInputHaveValue(nodeId, fieldName);
|
||||
const [isHovered, setIsHovered] = useState(false);
|
||||
|
||||
const { isConnected, isConnectionInProgress, isConnectionStartField, connectionError, shouldDim } =
|
||||
const { isConnected, isConnectionInProgress, isConnectionStartField, validationResult, shouldDim } =
|
||||
useConnectionState({ nodeId, fieldName, kind: 'inputs' });
|
||||
|
||||
const isMissingInput = useMemo(() => {
|
||||
@@ -55,21 +51,7 @@ const InputField = ({ nodeId, fieldName }: Props) => {
|
||||
setIsHovered(false);
|
||||
}, []);
|
||||
|
||||
if (!fieldTemplate || !fieldInstance) {
|
||||
return (
|
||||
<InputFieldWrapper shouldDim={shouldDim}>
|
||||
<FormControl alignItems="stretch" justifyContent="space-between" flexDir="column" gap={2} h="full" w="full">
|
||||
<FormLabel display="flex" alignItems="center" mb={0} px={1} gap={2} h="full">
|
||||
{t('nodes.unknownInput', {
|
||||
name: fieldInstance?.label ?? fieldTemplate?.title ?? fieldName,
|
||||
})}
|
||||
</FormLabel>
|
||||
</FormControl>
|
||||
</InputFieldWrapper>
|
||||
);
|
||||
}
|
||||
|
||||
if (fieldTemplate.input === 'connection') {
|
||||
if (fieldTemplate.input === 'connection' || isConnected) {
|
||||
return (
|
||||
<InputFieldWrapper shouldDim={shouldDim}>
|
||||
<FormControl isInvalid={isMissingInput} isDisabled={isConnected} px={2}>
|
||||
@@ -79,6 +61,7 @@ const InputField = ({ nodeId, fieldName }: Props) => {
|
||||
kind="inputs"
|
||||
isMissingInput={isMissingInput}
|
||||
withTooltip
|
||||
shouldDim
|
||||
/>
|
||||
</FormControl>
|
||||
|
||||
@@ -87,7 +70,7 @@ const InputField = ({ nodeId, fieldName }: Props) => {
|
||||
handleType="target"
|
||||
isConnectionInProgress={isConnectionInProgress}
|
||||
isConnectionStartField={isConnectionStartField}
|
||||
connectionError={connectionError}
|
||||
validationResult={validationResult}
|
||||
/>
|
||||
</InputFieldWrapper>
|
||||
);
|
||||
@@ -95,7 +78,15 @@ const InputField = ({ nodeId, fieldName }: Props) => {
|
||||
|
||||
return (
|
||||
<InputFieldWrapper shouldDim={shouldDim}>
|
||||
<FormControl isInvalid={isMissingInput} isDisabled={isConnected} orientation="vertical" px={2}>
|
||||
<FormControl
|
||||
isInvalid={isMissingInput}
|
||||
isDisabled={isConnected}
|
||||
// Without pointerEvents prop, disabled inputs don't trigger reactflow events. For example, when making a
|
||||
// connection, the mouse up to end the connection won't fire, leaving the connection in-progress.
|
||||
pointerEvents={isConnected ? 'none' : 'auto'}
|
||||
orientation="vertical"
|
||||
px={2}
|
||||
>
|
||||
<Flex flexDir="column" w="full" gap={1} onMouseEnter={onMouseEnter} onMouseLeave={onMouseLeave}>
|
||||
<Flex>
|
||||
<EditableFieldTitle
|
||||
@@ -117,7 +108,7 @@ const InputField = ({ nodeId, fieldName }: Props) => {
|
||||
handleType="target"
|
||||
isConnectionInProgress={isConnectionInProgress}
|
||||
isConnectionStartField={isConnectionStartField}
|
||||
connectionError={connectionError}
|
||||
validationResult={validationResult}
|
||||
/>
|
||||
)}
|
||||
</InputFieldWrapper>
|
||||
@@ -125,27 +116,3 @@ const InputField = ({ nodeId, fieldName }: Props) => {
|
||||
};
|
||||
|
||||
export default memo(InputField);
|
||||
|
||||
type InputFieldWrapperProps = PropsWithChildren<{
|
||||
shouldDim: boolean;
|
||||
}>;
|
||||
|
||||
const InputFieldWrapper = memo(({ shouldDim, children }: InputFieldWrapperProps) => {
|
||||
return (
|
||||
<Flex
|
||||
position="relative"
|
||||
minH={8}
|
||||
py={0.5}
|
||||
alignItems="center"
|
||||
opacity={shouldDim ? 0.5 : 1}
|
||||
transitionProperty="opacity"
|
||||
transitionDuration="0.1s"
|
||||
w="full"
|
||||
h="full"
|
||||
>
|
||||
{children}
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
InputFieldWrapper.displayName = 'InputFieldWrapper';
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import ModelIdentifierFieldInputComponent from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/ModelIdentifierFieldInputComponent';
|
||||
import { useFieldInputInstance } from 'features/nodes/hooks/useFieldInputInstance';
|
||||
import { useFieldInputTemplate } from 'features/nodes/hooks/useFieldInputTemplate';
|
||||
import {
|
||||
@@ -23,6 +24,8 @@ import {
|
||||
isLoRAModelFieldInputTemplate,
|
||||
isMainModelFieldInputInstance,
|
||||
isMainModelFieldInputTemplate,
|
||||
isModelIdentifierFieldInputInstance,
|
||||
isModelIdentifierFieldInputTemplate,
|
||||
isSchedulerFieldInputInstance,
|
||||
isSchedulerFieldInputTemplate,
|
||||
isSDXLMainModelFieldInputInstance,
|
||||
@@ -95,6 +98,10 @@ const InputFieldRenderer = ({ nodeId, fieldName }: InputFieldProps) => {
|
||||
return <MainModelFieldInputComponent nodeId={nodeId} field={fieldInstance} fieldTemplate={fieldTemplate} />;
|
||||
}
|
||||
|
||||
if (isModelIdentifierFieldInputInstance(fieldInstance) && isModelIdentifierFieldInputTemplate(fieldTemplate)) {
|
||||
return <ModelIdentifierFieldInputComponent nodeId={nodeId} field={fieldInstance} fieldTemplate={fieldTemplate} />;
|
||||
}
|
||||
|
||||
if (isSDXLRefinerModelFieldInputInstance(fieldInstance) && isSDXLRefinerModelFieldInputTemplate(fieldTemplate)) {
|
||||
return <RefinerModelFieldInputComponent nodeId={nodeId} field={fieldInstance} fieldTemplate={fieldTemplate} />;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo } from 'react';
|
||||
|
||||
type InputFieldWrapperProps = PropsWithChildren<{
|
||||
shouldDim: boolean;
|
||||
}>;
|
||||
|
||||
export const InputFieldWrapper = memo(({ shouldDim, children }: InputFieldWrapperProps) => {
|
||||
return (
|
||||
<Flex
|
||||
position="relative"
|
||||
minH={8}
|
||||
py={0.5}
|
||||
alignItems="center"
|
||||
opacity={shouldDim ? 0.5 : 1}
|
||||
transitionProperty="opacity"
|
||||
transitionDuration="0.1s"
|
||||
w="full"
|
||||
h="full"
|
||||
>
|
||||
{children}
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
InputFieldWrapper.displayName = 'InputFieldWrapper';
|
||||
@@ -0,0 +1,59 @@
|
||||
import { Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { $templates, selectNodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import { selectInvocationNode } from 'features/nodes/store/selectors';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
type Props = PropsWithChildren<{
|
||||
nodeId: string;
|
||||
fieldName: string;
|
||||
}>;
|
||||
|
||||
export const InvocationInputFieldCheck = memo(({ nodeId, fieldName, children }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const templates = useStore($templates);
|
||||
const selector = useMemo(
|
||||
() =>
|
||||
createSelector(selectNodesSlice, (nodesSlice) => {
|
||||
const node = selectInvocationNode(nodesSlice, nodeId);
|
||||
const instance = node.data.inputs[fieldName];
|
||||
const template = templates[node.data.type];
|
||||
const fieldTemplate = template?.inputs[fieldName];
|
||||
return {
|
||||
name: instance?.label || fieldTemplate?.title || fieldName,
|
||||
hasInstance: Boolean(instance),
|
||||
hasTemplate: Boolean(fieldTemplate),
|
||||
};
|
||||
}),
|
||||
[fieldName, nodeId, templates]
|
||||
);
|
||||
const { hasInstance, hasTemplate, name } = useAppSelector(selector);
|
||||
|
||||
if (!hasTemplate || !hasInstance) {
|
||||
return (
|
||||
<Flex position="relative" minH={8} py={0.5} alignItems="center" w="full" h="full">
|
||||
<FormControl
|
||||
isInvalid={true}
|
||||
alignItems="stretch"
|
||||
justifyContent="center"
|
||||
flexDir="column"
|
||||
gap={2}
|
||||
h="full"
|
||||
w="full"
|
||||
>
|
||||
<FormLabel display="flex" mb={0} px={1} py={2} gap={2}>
|
||||
{t('nodes.unknownInput', { name })}
|
||||
</FormLabel>
|
||||
</FormControl>
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
|
||||
return children;
|
||||
});
|
||||
|
||||
InvocationInputFieldCheck.displayName = 'InvocationInputFieldCheck';
|
||||
@@ -3,6 +3,7 @@ import { CSS } from '@dnd-kit/utilities';
|
||||
import { Flex, Icon, IconButton, Spacer, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import NodeSelectionOverlay from 'common/components/NodeSelectionOverlay';
|
||||
import { InvocationInputFieldCheck } from 'features/nodes/components/flow/nodes/Invocation/fields/InvocationFieldCheck';
|
||||
import { useFieldOriginalValue } from 'features/nodes/hooks/useFieldOriginalValue';
|
||||
import { useMouseOverNode } from 'features/nodes/hooks/useMouseOverNode';
|
||||
import { workflowExposedFieldRemoved } from 'features/nodes/store/workflowSlice';
|
||||
@@ -20,7 +21,7 @@ type Props = {
|
||||
fieldName: string;
|
||||
};
|
||||
|
||||
const LinearViewField = ({ nodeId, fieldName }: Props) => {
|
||||
const LinearViewFieldInternal = ({ nodeId, fieldName }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { isValueChanged, onReset } = useFieldOriginalValue(nodeId, fieldName);
|
||||
const { isMouseOverNode, handleMouseOut, handleMouseOver } = useMouseOverNode(nodeId);
|
||||
@@ -99,4 +100,12 @@ const LinearViewField = ({ nodeId, fieldName }: Props) => {
|
||||
);
|
||||
};
|
||||
|
||||
const LinearViewField = ({ nodeId, fieldName }: Props) => {
|
||||
return (
|
||||
<InvocationInputFieldCheck nodeId={nodeId} fieldName={fieldName}>
|
||||
<LinearViewFieldInternal nodeId={nodeId} fieldName={fieldName} />
|
||||
</InvocationInputFieldCheck>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(LinearViewField);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user