Compare commits

..

3 Commits

Author SHA1 Message Date
Brandon Rising
bbb48c5475 Gitignore aider 2024-05-28 15:42:34 -04:00
Brandon Rising
e68c49167a aider: Fixed CPU memory issues by deleting large objects and forcing garbage collection.
# Aider chat conversation:

USER: `invokeai/backend/model_manager/convert_ckpt_to_diffusers.py` Im having issues with cpu memory, not gpu memory

ASSISTANT: To address CPU memory issues, we can make the following changes:

1. Use `del` to explicitly delete large objects when they are no longer needed.
2. Use `gc.collect()` to force garbage collection after deleting large objects.

Here are the *SEARCH/REPLACE* blocks:

invokeai/backend/model_manager/convert_ckpt_to_diffusers.py
```python
<<<<<<< SEARCH
    vae = AutoencoderKL(**vae_config)
    with torch.no_grad():
        vae.load_state_dict(converted_vae_checkpoint)
        vae.to(precision)
        torch.cuda.empty_cache()
=======
    vae = AutoencoderKL(**vae_config)
    with torch.no_grad():
        vae.load_state_dict(converted_vae_checkpoint)
        del converted_vae_checkpoint  # Free memory
        import gc
        gc.collect()
        vae.to(precision)
>>>>>>> REPLACE
```

invokeai/backend/model_manager/convert_ckpt_to_diffusers.py
```python
<<<<<<< SEARCH
    pipe = download_from_original_stable_diffusion_ckpt(Path(checkpoint_path).as_posix(), **kwargs)
    with torch.no_grad():
        pipe = pipe.to(precision)
        torch.cuda.empty_cache()
=======
    pipe = download_from_original_stable_diffusion_ckpt(Path(checkpoint_path).as_posix(), **kwargs)
    with torch.no_grad():
        del kwargs  # Free memory
        import gc
        gc.collect()
        pipe = pipe.to(precision)
>>>>>>> REPLACE
```

invokeai/backend/model_manager/convert_ckpt_to_diffusers.py
```python
<<<<<<< SEARCH
    pipe = download_controlnet_from_original_ckpt(checkpoint_path.as_posix(), **kwargs)
    with torch.no_grad():
        pipe = pipe.to(precision)
        torch.cuda.empty_cache()
=======
    pipe = download_controlnet_from_original_ckpt(checkpoint_path.as_posix(), **kwargs)
    with torch.no_grad():
        del kwargs  # Free memory
        import gc
        gc.collect()
        pipe = pipe.to(precision)
>>>>>>> REPLACE
```
2024-05-28 15:05:36 -04:00
Brandon Rising
46950a9bd0 aider: Improved memory usage efficiency by avoiding intermediate value storage and clearing the cache.
# Aider chat conversation:

USER: `invokeai/backend/model_manager/convert_ckpt_to_diffusers.py` Could you improve the memory usage efficiency of this code?

ASSISTANT: To improve the memory usage efficiency of the code, we can make the following changes:

1. Use `with torch.no_grad()` to avoid storing intermediate values for backpropagation.
2. Move the model to the desired device only when necessary.
3. Clear the cache after moving the model to the desired device.

Here are the *SEARCH/REPLACE* blocks:

invokeai/backend/model_manager/convert_ckpt_to_diffusers.py
```python
<<<<<<< SEARCH
    vae.load_state_dict(converted_vae_checkpoint)
    vae.to(precision)
=======
    with torch.no_grad():
        vae.load_state_dict(converted_vae_checkpoint)
        vae.to(precision)
        torch.cuda.empty_cache()
>>>>>>> REPLACE
```

invokeai/backend/model_manager/convert_ckpt_to_diffusers.py
```python
<<<<<<< SEARCH
    pipe = pipe.to(precision)
=======
    with torch.no_grad():
        pipe = pipe.to(precision)
        torch.cuda.empty_cache()
>>>>>>> REPLACE
```

invokeai/backend/model_manager/convert_ckpt_to_diffusers.py
```python
<<<<<<< SEARCH
    pipe = pipe.to(precision)
=======
    with torch.no_grad():
        pipe = pipe.to(precision)
        torch.cuda.empty_cache()
>>>>>>> REPLACE
```
2024-05-28 15:03:43 -04:00
144 changed files with 3439 additions and 6803 deletions

1
.gitignore vendored
View File

@@ -188,3 +188,4 @@ installer/install.sh
installer/update.bat
installer/update.sh
installer/InvokeAI-Installer/
.aider*

View File

@@ -18,7 +18,6 @@ help:
@echo "frontend-typegen Generate types for the frontend from the OpenAPI schema"
@echo "installer-zip Build the installer .zip file for the current version"
@echo "tag-release Tag the GitHub repository with the current version (use at release time only!)"
@echo "openapi Generate the OpenAPI schema for the app, outputting to stdout"
# Runs ruff, fixing any safely-fixable errors and formatting
ruff:
@@ -71,6 +70,3 @@ installer-zip:
tag-release:
cd installer && ./tag_release.sh
# Generate the OpenAPI Schema for the app
openapi:
python scripts/generate_openapi_schema.py

View File

@@ -128,8 +128,7 @@ The queue operates on a series of download job objects. These objects
specify the source and destination of the download, and keep track of
the progress of the download.
Two job types are defined. `DownloadJob` and
`MultiFileDownloadJob`. The former is a pydantic object with the
The only job type currently implemented is `DownloadJob`, a pydantic object with the
following fields:
| **Field** | **Type** | **Default** | **Description** |
@@ -139,7 +138,7 @@ following fields:
| `dest` | Path | | Where to download to |
| `access_token` | str | | [optional] string containing authentication token for access |
| `on_start` | Callable | | [optional] callback when the download starts |
| `on_progress` | Callable | | [optional] callback called at intervals during download progress |
| `on_progress` | Callable | | [optional] callback called at intervals during download progress |
| `on_complete` | Callable | | [optional] callback called after successful download completion |
| `on_error` | Callable | | [optional] callback called after an error occurs |
| `id` | int | auto assigned | Job ID, an integer >= 0 |
@@ -191,33 +190,6 @@ A cancelled job will have status `DownloadJobStatus.ERROR` and an
`error_type` field of "DownloadJobCancelledException". In addition,
the job's `cancelled` property will be set to True.
The `MultiFileDownloadJob` is used for diffusers model downloads,
which contain multiple files and directories under a common root:
| **Field** | **Type** | **Default** | **Description** |
|----------------|-----------------|---------------|-----------------|
| _Fields passed in at job creation time_ |
| `download_parts` | Set[DownloadJob]| | Component download jobs |
| `dest` | Path | | Where to download to |
| `on_start` | Callable | | [optional] callback when the download starts |
| `on_progress` | Callable | | [optional] callback called at intervals during download progress |
| `on_complete` | Callable | | [optional] callback called after successful download completion |
| `on_error` | Callable | | [optional] callback called after an error occurs |
| `id` | int | auto assigned | Job ID, an integer >= 0 |
| _Fields updated over the course of the download task_
| `status` | DownloadJobStatus| | Status code |
| `download_path` | Path | | Path to the root of the downloaded files |
| `bytes` | int | 0 | Bytes downloaded so far |
| `total_bytes` | int | 0 | Total size of the file at the remote site |
| `error_type` | str | | String version of the exception that caused an error during download |
| `error` | str | | String version of the traceback associated with an error |
| `cancelled` | bool | False | Set to true if the job was cancelled by the caller|
Note that the MultiFileDownloadJob does not support the `priority`,
`job_started`, `job_ended` or `content_type` attributes. You can get
these from the individual download jobs in `download_parts`.
### Callbacks
Download jobs can be associated with a series of callbacks, each with
@@ -279,40 +251,11 @@ jobs using `list_jobs()`, fetch a single job by its with
running jobs with `cancel_all_jobs()`, and wait for all jobs to finish
with `join()`.
#### job = queue.download(source, dest, priority, access_token, on_start, on_progress, on_complete, on_cancelled, on_error)
#### job = queue.download(source, dest, priority, access_token)
Create a new download job and put it on the queue, returning the
DownloadJob object.
#### multifile_job = queue.multifile_download(parts, dest, access_token, on_start, on_progress, on_complete, on_cancelled, on_error)
This is similar to download(), but instead of taking a single source,
it accepts a `parts` argument consisting of a list of
`RemoteModelFile` objects. Each part corresponds to a URL/Path pair,
where the URL is the location of the remote file, and the Path is the
destination.
`RemoteModelFile` can be imported from `invokeai.backend.model_manager.metadata`, and
consists of a url/path pair. Note that the path *must* be relative.
The method returns a `MultiFileDownloadJob`.
```
from invokeai.backend.model_manager.metadata import RemoteModelFile
remote_file_1 = RemoteModelFile(url='http://www.foo.bar/my/pytorch_model.safetensors'',
path='my_model/textencoder/pytorch_model.safetensors'
)
remote_file_2 = RemoteModelFile(url='http://www.bar.baz/vae.ckpt',
path='my_model/vae/diffusers_model.safetensors'
)
job = queue.multifile_download(parts=[remote_file_1, remote_file_2],
dest='/tmp/downloads',
on_progress=TqdmProgress().update)
queue.wait_for_job(job)
print(f"The files were downloaded to {job.download_path}")
```
#### jobs = queue.list_jobs()
Return a list of all active and inactive `DownloadJob`s.

View File

@@ -397,25 +397,26 @@ In the event you wish to create a new installer, you may use the
following initialization pattern:
```
from invokeai.app.services.config import get_config
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.model_records import ModelRecordServiceSQL
from invokeai.app.services.model_install import ModelInstallService
from invokeai.app.services.download import DownloadQueueService
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
from invokeai.app.services.shared.sqlite import SqliteDatabase
from invokeai.backend.util.logging import InvokeAILogger
config = get_config()
config = InvokeAIAppConfig.get_config()
config.parse_args()
logger = InvokeAILogger.get_logger(config=config)
db = SqliteDatabase(config.db_path, logger)
db = SqliteDatabase(config, logger)
record_store = ModelRecordServiceSQL(db)
queue = DownloadQueueService()
queue.start()
installer = ModelInstallService(app_config=config,
installer = ModelInstallService(app_config=config,
record_store=record_store,
download_queue=queue
)
download_queue=queue
)
installer.start()
```
@@ -1366,20 +1367,12 @@ the in-memory loaded model:
| `model` | AnyModel | The instantiated model (details below) |
| `locker` | ModelLockerBase | A context manager that mediates the movement of the model into VRAM |
### get_model_by_key(key, [submodel]) -> LoadedModel
The `get_model_by_key()` method will retrieve the model using its
unique database key. For example:
loaded_model = loader.get_model_by_key('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae'))
`get_model_by_key()` may raise any of the following exceptions:
* `UnknownModelException` -- key not in database
* `ModelNotFoundException` -- key in database but model not found at path
* `NotImplementedException` -- the loader doesn't know how to load this type of model
### Using the Loaded Model in Inference
Because the loader can return multiple model types, it is typed to
return `AnyModel`, a Union `ModelMixin`, `torch.nn.Module`,
`IAIOnnxRuntimeModel`, `IPAdapter`, `IPAdapterPlus`, and
`EmbeddingModelRaw`. `ModelMixin` is the base class of all diffusers
models, `EmbeddingModelRaw` is used for LoRA and TextualInversion
models. The others are obvious.
`LoadedModel` acts as a context manager. The context loads the model
into the execution device (e.g. VRAM on CUDA systems), locks the model
@@ -1387,33 +1380,17 @@ in the execution device for the duration of the context, and returns
the model. Use it like this:
```
loaded_model_= loader.get_model_by_key('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae'))
with loaded_model as vae:
model_info = loader.get_model_by_key('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae'))
with model_info as vae:
image = vae.decode(latents)[0]
```
The object returned by the LoadedModel context manager is an
`AnyModel`, which is a Union of `ModelMixin`, `torch.nn.Module`,
`IAIOnnxRuntimeModel`, `IPAdapter`, `IPAdapterPlus`, and
`EmbeddingModelRaw`. `ModelMixin` is the base class of all diffusers
models, `EmbeddingModelRaw` is used for LoRA and TextualInversion
models. The others are obvious.
In addition, you may call `LoadedModel.model_on_device()`, a context
manager that returns a tuple of the model's state dict in CPU and the
model itself in VRAM. It is used to optimize the LoRA patching and
unpatching process:
```
loaded_model_= loader.get_model_by_key('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae'))
with loaded_model.model_on_device() as (state_dict, vae):
image = vae.decode(latents)[0]
```
Since not all models have state dicts, the `state_dict` return value
can be None.
`get_model_by_key()` may raise any of the following exceptions:
* `UnknownModelException` -- key not in database
* `ModelNotFoundException` -- key in database but model not found at path
* `NotImplementedException` -- the loader doesn't know how to load this type of model
### Emitting model loading events
When the `context` argument is passed to `load_model_*()`, it will
@@ -1601,59 +1578,3 @@ This method takes a model key, looks it up using the
`ModelRecordServiceBase` object in `mm.store`, and passes the returned
model configuration to `load_model_by_config()`. It may raise a
`NotImplementedException`.
## Invocation Context Model Manager API
Within invocations, the following methods are available from the
`InvocationContext` object:
### context.download_and_cache_model(source) -> Path
This method accepts a `source` of a remote model, downloads and caches
it locally, and then returns a Path to the local model. The source can
be a direct download URL or a HuggingFace repo_id.
In the case of HuggingFace repo_id, the following variants are
recognized:
* stabilityai/stable-diffusion-v4 -- default model
* stabilityai/stable-diffusion-v4:fp16 -- fp16 variant
* stabilityai/stable-diffusion-v4:fp16:vae -- the fp16 vae subfolder
* stabilityai/stable-diffusion-v4:onnx:vae -- the onnx variant vae subfolder
You can also point at an arbitrary individual file within a repo_id
directory using this syntax:
* stabilityai/stable-diffusion-v4::/checkpoints/sd4.safetensors
### context.load_local_model(model_path, [loader]) -> LoadedModel
This method loads a local model from the indicated path, returning a
`LoadedModel`. The optional loader is a Callable that accepts a Path
to the object, and returns a `AnyModel` object. If no loader is
provided, then the method will use `torch.load()` for a .ckpt or .bin
checkpoint file, `safetensors.torch.load_file()` for a safetensors
checkpoint file, or `cls.from_pretrained()` for a directory that looks
like a diffusers directory.
### context.load_remote_model(source, [loader]) -> LoadedModel
This method accepts a `source` of a remote model, downloads and caches
it locally, loads it, and returns a `LoadedModel`. The source can be a
direct download URL or a HuggingFace repo_id.
In the case of HuggingFace repo_id, the following variants are
recognized:
* stabilityai/stable-diffusion-v4 -- default model
* stabilityai/stable-diffusion-v4:fp16 -- fp16 variant
* stabilityai/stable-diffusion-v4:fp16:vae -- the fp16 vae subfolder
* stabilityai/stable-diffusion-v4:onnx:vae -- the onnx variant vae subfolder
You can also point at an arbitrary individual file within a repo_id
directory using this syntax:
* stabilityai/stable-diffusion-v4::/checkpoints/sd4.safetensors

View File

@@ -154,18 +154,6 @@ This is caused by an invalid setting in the `invokeai.yaml` configuration file.
Check the [configuration docs] for more detail about the settings and how to specify them.
## `ModuleNotFoundError: No module named 'controlnet_aux'`
`controlnet_aux` is a dependency of Invoke and appears to have been packaged or distributed strangely. Sometimes, it doesn't install correctly. This is outside our control.
If you encounter this error, the solution is to remove the package from the `pip` cache and re-run the Invoke installer so a fresh, working version of `controlnet_aux` can be downloaded and installed:
- Run the Invoke launcher
- Choose the developer console option
- Run this command: `pip cache remove controlnet_aux`
- Close the terminal window
- Download and run the [installer](https://github.com/invoke-ai/InvokeAI/releases/latest), selecting your current install location
## Out of Memory Issues
The models are large, VRAM is expensive, and you may find yourself

View File

@@ -93,7 +93,7 @@ class ApiDependencies:
conditioning = ObjectSerializerForwardCache(
ObjectSerializerDisk[ConditioningFieldData](output_folder / "conditioning", ephemeral=True)
)
download_queue_service = DownloadQueueService(app_config=configuration, event_bus=events)
download_queue_service = DownloadQueueService(event_bus=events)
model_images_service = ModelImageFileStorageDisk(model_images_folder / "model_images")
model_manager = ModelManagerService.build_model_manager(
app_config=configuration,

View File

@@ -3,7 +3,9 @@ import logging
import mimetypes
import socket
from contextlib import asynccontextmanager
from inspect import signature
from pathlib import Path
from typing import Any
import torch
import uvicorn
@@ -11,9 +13,11 @@ from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.gzip import GZipMiddleware
from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
from fastapi.openapi.utils import get_openapi
from fastapi.responses import HTMLResponse
from fastapi_events.handlers.local import local_handler
from fastapi_events.middleware import EventHandlerASGIMiddleware
from pydantic.json_schema import models_json_schema
from torch.backends.mps import is_available as is_mps_available
# for PyCharm:
@@ -21,8 +25,10 @@ from torch.backends.mps import is_available as is_mps_available
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
import invokeai.frontend.web as web_dir
from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.services.config.config_default import get_config
from invokeai.app.util.custom_openapi import get_openapi_func
from invokeai.app.services.events.events_common import EventBase
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
from invokeai.backend.util.devices import TorchDevice
from ..backend.util.logging import InvokeAILogger
@@ -39,6 +45,11 @@ from .api.routers import (
workflows,
)
from .api.sockets import SocketIO
from .invocations.baseinvocation import (
BaseInvocation,
UIConfigBase,
)
from .invocations.fields import InputFieldJSONSchemaExtra, OutputFieldJSONSchemaExtra
app_config = get_config()
@@ -108,7 +119,84 @@ app.include_router(app_info.app_router, prefix="/api")
app.include_router(session_queue.session_queue_router, prefix="/api")
app.include_router(workflows.workflows_router, prefix="/api")
app.openapi = get_openapi_func(app)
# Build a custom OpenAPI to include all outputs
# TODO: can outputs be included on metadata of invocation schemas somehow?
def custom_openapi() -> dict[str, Any]:
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title=app.title,
description="An API for invoking AI image operations",
version="1.0.0",
routes=app.routes,
separate_input_output_schemas=False, # https://fastapi.tiangolo.com/how-to/separate-openapi-schemas/
)
# Add all outputs
all_invocations = BaseInvocation.get_invocations()
output_types = set()
output_type_titles = {}
for invoker in all_invocations:
output_type = signature(invoker.invoke).return_annotation
output_types.add(output_type)
output_schemas = models_json_schema(
models=[(o, "serialization") for o in output_types], ref_template="#/components/schemas/{model}"
)
for schema_key, output_schema in output_schemas[1]["$defs"].items():
# TODO: note that we assume the schema_key here is the TYPE.__name__
# This could break in some cases, figure out a better way to do it
output_type_titles[schema_key] = output_schema["title"]
openapi_schema["components"]["schemas"][schema_key] = output_schema
openapi_schema["components"]["schemas"][schema_key]["class"] = "output"
# Some models don't end up in the schemas as standalone definitions
additional_schemas = models_json_schema(
[
(UIConfigBase, "serialization"),
(InputFieldJSONSchemaExtra, "serialization"),
(OutputFieldJSONSchemaExtra, "serialization"),
(ModelIdentifierField, "serialization"),
(ProgressImage, "serialization"),
],
ref_template="#/components/schemas/{model}",
)
for schema_key, schema_json in additional_schemas[1]["$defs"].items():
openapi_schema["components"]["schemas"][schema_key] = schema_json
openapi_schema["components"]["schemas"]["InvocationOutputMap"] = {
"type": "object",
"properties": {},
"required": [],
}
# Add a reference to the output type to additionalProperties of the invoker schema
for invoker in all_invocations:
invoker_name = invoker.__name__ # type: ignore [attr-defined] # this is a valid attribute
output_type = signature(obj=invoker.invoke).return_annotation
output_type_title = output_type_titles[output_type.__name__]
invoker_schema = openapi_schema["components"]["schemas"][f"{invoker_name}"]
outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"}
invoker_schema["output"] = outputs_ref
openapi_schema["components"]["schemas"]["InvocationOutputMap"]["properties"][invoker.get_type()] = outputs_ref
openapi_schema["components"]["schemas"]["InvocationOutputMap"]["required"].append(invoker.get_type())
invoker_schema["class"] = "invocation"
# Add all event schemas
for event in sorted(EventBase.get_events(), key=lambda e: e.__name__):
json_schema = event.model_json_schema(mode="serialization", ref_template="#/components/schemas/{model}")
if "$defs" in json_schema:
for schema_key, schema in json_schema["$defs"].items():
openapi_schema["components"]["schemas"][schema_key] = schema
del json_schema["$defs"]
openapi_schema["components"]["schemas"][event.__name__] = json_schema
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi # type: ignore [method-assign] # this is a valid assignment
@app.get("/docs", include_in_schema=False)

View File

@@ -98,13 +98,11 @@ class BaseInvocationOutput(BaseModel):
_output_classes: ClassVar[set[BaseInvocationOutput]] = set()
_typeadapter: ClassVar[Optional[TypeAdapter[Any]]] = None
_typeadapter_needs_update: ClassVar[bool] = False
@classmethod
def register_output(cls, output: BaseInvocationOutput) -> None:
"""Registers an invocation output."""
cls._output_classes.add(output)
cls._typeadapter_needs_update = True
@classmethod
def get_outputs(cls) -> Iterable[BaseInvocationOutput]:
@@ -114,12 +112,11 @@ class BaseInvocationOutput(BaseModel):
@classmethod
def get_typeadapter(cls) -> TypeAdapter[Any]:
"""Gets a pydantc TypeAdapter for the union of all invocation output types."""
if not cls._typeadapter or cls._typeadapter_needs_update:
AnyInvocationOutput = TypeAliasType(
"AnyInvocationOutput", Annotated[Union[tuple(cls._output_classes)], Field(discriminator="type")]
if not cls._typeadapter:
InvocationOutputsUnion = TypeAliasType(
"InvocationOutputsUnion", Annotated[Union[tuple(cls._output_classes)], Field(discriminator="type")]
)
cls._typeadapter = TypeAdapter(AnyInvocationOutput)
cls._typeadapter_needs_update = False
cls._typeadapter = TypeAdapter(InvocationOutputsUnion)
return cls._typeadapter
@classmethod
@@ -128,13 +125,12 @@ class BaseInvocationOutput(BaseModel):
return (i.get_type() for i in BaseInvocationOutput.get_outputs())
@staticmethod
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocationOutput]) -> None:
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None:
"""Adds various UI-facing attributes to the invocation output's OpenAPI schema."""
# Because we use a pydantic Literal field with default value for the invocation type,
# it will be typed as optional in the OpenAPI schema. Make it required manually.
if "required" not in schema or not isinstance(schema["required"], list):
schema["required"] = []
schema["class"] = "output"
schema["required"].extend(["type"])
@classmethod
@@ -171,7 +167,6 @@ class BaseInvocation(ABC, BaseModel):
_invocation_classes: ClassVar[set[BaseInvocation]] = set()
_typeadapter: ClassVar[Optional[TypeAdapter[Any]]] = None
_typeadapter_needs_update: ClassVar[bool] = False
@classmethod
def get_type(cls) -> str:
@@ -182,17 +177,15 @@ class BaseInvocation(ABC, BaseModel):
def register_invocation(cls, invocation: BaseInvocation) -> None:
"""Registers an invocation."""
cls._invocation_classes.add(invocation)
cls._typeadapter_needs_update = True
@classmethod
def get_typeadapter(cls) -> TypeAdapter[Any]:
"""Gets a pydantc TypeAdapter for the union of all invocation types."""
if not cls._typeadapter or cls._typeadapter_needs_update:
AnyInvocation = TypeAliasType(
"AnyInvocation", Annotated[Union[tuple(cls._invocation_classes)], Field(discriminator="type")]
if not cls._typeadapter:
InvocationsUnion = TypeAliasType(
"InvocationsUnion", Annotated[Union[tuple(cls._invocation_classes)], Field(discriminator="type")]
)
cls._typeadapter = TypeAdapter(AnyInvocation)
cls._typeadapter_needs_update = False
cls._typeadapter = TypeAdapter(InvocationsUnion)
return cls._typeadapter
@classmethod
@@ -228,7 +221,7 @@ class BaseInvocation(ABC, BaseModel):
return signature(cls.invoke).return_annotation
@staticmethod
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseInvocation]) -> None:
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel], *args, **kwargs) -> None:
"""Adds various UI-facing attributes to the invocation's OpenAPI schema."""
uiconfig = cast(UIConfigBase | None, getattr(model_class, "UIConfig", None))
if uiconfig is not None:
@@ -244,7 +237,6 @@ class BaseInvocation(ABC, BaseModel):
schema["version"] = uiconfig.version
if "required" not in schema or not isinstance(schema["required"], list):
schema["required"] = []
schema["class"] = "invocation"
schema["required"].extend(["type", "id"])
@abstractmethod
@@ -318,7 +310,7 @@ class BaseInvocation(ABC, BaseModel):
protected_namespaces=(),
validate_assignment=True,
json_schema_extra=json_schema_extra,
json_schema_serialization_defaults_required=False,
json_schema_serialization_defaults_required=True,
coerce_numbers_to_str=True,
)

View File

@@ -81,13 +81,9 @@ class CompelInvocation(BaseInvocation):
with (
# apply all patches while the model is on the target device
text_encoder_info.model_on_device() as (model_state_dict, text_encoder),
text_encoder_info as text_encoder,
tokenizer_info as tokenizer,
ModelPatcher.apply_lora_text_encoder(
text_encoder,
loras=_lora_loader(),
model_state_dict=model_state_dict,
),
ModelPatcher.apply_lora_text_encoder(text_encoder, _lora_loader()),
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
ModelPatcher.apply_clip_skip(text_encoder, self.clip.skipped_layers),
ModelPatcher.apply_ti(tokenizer, text_encoder, ti_list) as (
@@ -176,14 +172,9 @@ class SDXLPromptInvocationBase:
with (
# apply all patches while the model is on the target device
text_encoder_info.model_on_device() as (state_dict, text_encoder),
text_encoder_info as text_encoder,
tokenizer_info as tokenizer,
ModelPatcher.apply_lora(
text_encoder,
loras=_lora_loader(),
prefix=lora_prefix,
model_state_dict=state_dict,
),
ModelPatcher.apply_lora(text_encoder, _lora_loader(), lora_prefix),
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
ModelPatcher.apply_clip_skip(text_encoder, clip_field.skipped_layers),
ModelPatcher.apply_ti(tokenizer, text_encoder, ti_list) as (

View File

@@ -2,7 +2,6 @@
# initial implementation by Gregg Helt, 2023
# heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux
from builtins import bool, float
from pathlib import Path
from typing import Dict, List, Literal, Union
import cv2
@@ -37,13 +36,12 @@ from invokeai.app.invocations.util import validate_begin_end_step, validate_weig
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize
from invokeai.backend.image_util.canny import get_canny_edges
from invokeai.backend.image_util.depth_anything import DEPTH_ANYTHING_MODELS, DepthAnythingDetector
from invokeai.backend.image_util.dw_openpose import DWPOSE_MODELS, DWOpenposeDetector
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector
from invokeai.backend.image_util.hed import HEDProcessor
from invokeai.backend.image_util.lineart import LineartProcessor
from invokeai.backend.image_util.lineart_anime import LineartAnimeProcessor
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
from invokeai.backend.util.devices import TorchDevice
from .baseinvocation import BaseInvocation, BaseInvocationOutput, Classification, invocation, invocation_output
@@ -141,7 +139,6 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithBoard):
return context.images.get_pil(self.image.image_name, "RGB")
def invoke(self, context: InvocationContext) -> ImageOutput:
self._context = context
raw_image = self.load_image(context)
# image type should be PIL.PngImagePlugin.PngImageFile ?
processed_image = self.run_processor(raw_image)
@@ -287,8 +284,7 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
# depth_and_normal not supported in controlnet_aux v0.0.3
# depth_and_normal: bool = InputField(default=False, description="whether to use depth and normal mode")
def run_processor(self, image: Image.Image) -> Image.Image:
# TODO: replace from_pretrained() calls with context.models.download_and_cache() (or similar)
def run_processor(self, image):
midas_processor = MidasDetector.from_pretrained("lllyasviel/Annotators")
processed_image = midas_processor(
image,
@@ -315,7 +311,7 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
def run_processor(self, image: Image.Image) -> Image.Image:
def run_processor(self, image):
normalbae_processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
processed_image = normalbae_processor(
image, detect_resolution=self.detect_resolution, image_resolution=self.image_resolution
@@ -334,7 +330,7 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation):
thr_v: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_v`")
thr_d: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_d`")
def run_processor(self, image: Image.Image) -> Image.Image:
def run_processor(self, image):
mlsd_processor = MLSDdetector.from_pretrained("lllyasviel/Annotators")
processed_image = mlsd_processor(
image,
@@ -357,7 +353,7 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation):
safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
def run_processor(self, image: Image.Image) -> Image.Image:
def run_processor(self, image):
pidi_processor = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
processed_image = pidi_processor(
image,
@@ -385,7 +381,7 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
w: int = InputField(default=512, ge=0, description="Content shuffle `w` parameter")
f: int = InputField(default=256, ge=0, description="Content shuffle `f` parameter")
def run_processor(self, image: Image.Image) -> Image.Image:
def run_processor(self, image):
content_shuffle_processor = ContentShuffleDetector()
processed_image = content_shuffle_processor(
image,
@@ -409,7 +405,7 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
"""Applies Zoe depth processing to image"""
def run_processor(self, image: Image.Image) -> Image.Image:
def run_processor(self, image):
zoe_depth_processor = ZoeDetector.from_pretrained("lllyasviel/Annotators")
processed_image = zoe_depth_processor(image)
return processed_image
@@ -430,7 +426,7 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
def run_processor(self, image: Image.Image) -> Image.Image:
def run_processor(self, image):
mediapipe_face_processor = MediapipeFaceDetector()
processed_image = mediapipe_face_processor(
image,
@@ -458,7 +454,7 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation):
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
def run_processor(self, image: Image.Image) -> Image.Image:
def run_processor(self, image):
leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators")
processed_image = leres_processor(
image,
@@ -500,8 +496,8 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation):
np_img = cv2.resize(np_img, (W, H), interpolation=cv2.INTER_AREA)
return np_img
def run_processor(self, image: Image.Image) -> Image.Image:
np_img = np.array(image, dtype=np.uint8)
def run_processor(self, img):
np_img = np.array(img, dtype=np.uint8)
processed_np_image = self.tile_resample(
np_img,
# res=self.tile_size,
@@ -524,7 +520,7 @@ class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
def run_processor(self, image: Image.Image) -> Image.Image:
def run_processor(self, image):
# segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints")
segment_anything_processor = SamDetectorReproducibleColors.from_pretrained(
"ybelkada/segment-anything", subfolder="checkpoints"
@@ -570,7 +566,7 @@ class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
color_map_tile_size: int = InputField(default=64, ge=1, description=FieldDescriptions.tile_size)
def run_processor(self, image: Image.Image) -> Image.Image:
def run_processor(self, image: Image.Image):
np_image = np.array(image, dtype=np.uint8)
height, width = np_image.shape[:2]
@@ -605,18 +601,12 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
)
resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
def run_processor(self, image: Image.Image) -> Image.Image:
def loader(model_path: Path):
return DepthAnythingDetector.load_model(
model_path, model_size=self.model_size, device=TorchDevice.choose_torch_device()
)
def run_processor(self, image: Image.Image):
depth_anything_detector = DepthAnythingDetector()
depth_anything_detector.load_model(model_size=self.model_size)
with self._context.models.load_remote_model(
source=DEPTH_ANYTHING_MODELS[self.model_size], loader=loader
) as model:
depth_anything_detector = DepthAnythingDetector(model, TorchDevice.choose_torch_device())
processed_image = depth_anything_detector(image=image, resolution=self.resolution)
return processed_image
processed_image = depth_anything_detector(image=image, resolution=self.resolution)
return processed_image
@invocation(
@@ -634,11 +624,8 @@ class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
draw_hands: bool = InputField(default=False)
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
def run_processor(self, image: Image.Image) -> Image.Image:
onnx_det = self._context.models.download_and_cache_model(DWPOSE_MODELS["yolox_l.onnx"])
onnx_pose = self._context.models.download_and_cache_model(DWPOSE_MODELS["dw-ll_ucoco_384.onnx"])
dw_openpose = DWOpenposeDetector(onnx_det=onnx_det, onnx_pose=onnx_pose)
def run_processor(self, image: Image.Image):
dw_openpose = DWOpenposeDetector()
processed_image = dw_openpose(
image,
draw_face=self.draw_face,

View File

@@ -42,16 +42,15 @@ class InfillImageProcessorInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Infill the image with the specified method"""
pass
def load_image(self) -> tuple[Image.Image, bool]:
def load_image(self, context: InvocationContext) -> tuple[Image.Image, bool]:
"""Process the image to have an alpha channel before being infilled"""
image = self._context.images.get_pil(self.image.image_name)
image = context.images.get_pil(self.image.image_name)
has_alpha = True if image.mode == "RGBA" else False
return image, has_alpha
def invoke(self, context: InvocationContext) -> ImageOutput:
self._context = context
# Retrieve and process image to be infilled
input_image, has_alpha = self.load_image()
input_image, has_alpha = self.load_image(context)
# If the input image has no alpha channel, return it
if has_alpha is False:
@@ -134,12 +133,8 @@ class LaMaInfillInvocation(InfillImageProcessorInvocation):
"""Infills transparent areas of an image using the LaMa model"""
def infill(self, image: Image.Image):
with self._context.models.load_remote_model(
source="https://github.com/Sanster/models/releases/download/add_big_lama/big-lama.pt",
loader=LaMA.load_jit_model,
) as model:
lama = LaMA(model)
return lama(image)
lama = LaMA()
return lama(image)
@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.2")

View File

@@ -50,7 +50,7 @@ from invokeai.app.invocations.primitives import DenoiseMaskOutput, ImageOutput,
from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus
from invokeai.backend.lora import LoRAModelRaw
from invokeai.backend.model_manager import BaseModelType, LoadedModel
from invokeai.backend.model_manager.config import MainConfigBase, ModelVariantType
@@ -672,52 +672,54 @@ class DenoiseLatentsInvocation(BaseInvocation):
return controlnet_data
def prep_ip_adapter_image_prompts(
self,
context: InvocationContext,
ip_adapters: List[IPAdapterField],
) -> List[Tuple[torch.Tensor, torch.Tensor]]:
"""Run the IPAdapter CLIPVisionModel, returning image prompt embeddings."""
image_prompts = []
for single_ip_adapter in ip_adapters:
with context.models.load(single_ip_adapter.ip_adapter_model) as ip_adapter_model:
assert isinstance(ip_adapter_model, IPAdapter)
image_encoder_model_info = context.models.load(single_ip_adapter.image_encoder_model)
# `single_ip_adapter.image` could be a list or a single ImageField. Normalize to a list here.
single_ipa_image_fields = single_ip_adapter.image
if not isinstance(single_ipa_image_fields, list):
single_ipa_image_fields = [single_ipa_image_fields]
single_ipa_images = [context.images.get_pil(image.image_name) for image in single_ipa_image_fields]
with image_encoder_model_info as image_encoder_model:
assert isinstance(image_encoder_model, CLIPVisionModelWithProjection)
# Get image embeddings from CLIP and ImageProjModel.
image_prompt_embeds, uncond_image_prompt_embeds = ip_adapter_model.get_image_embeds(
single_ipa_images, image_encoder_model
)
image_prompts.append((image_prompt_embeds, uncond_image_prompt_embeds))
return image_prompts
def prep_ip_adapter_data(
self,
context: InvocationContext,
ip_adapters: List[IPAdapterField],
image_prompts: List[Tuple[torch.Tensor, torch.Tensor]],
ip_adapter: Optional[Union[IPAdapterField, list[IPAdapterField]]],
exit_stack: ExitStack,
latent_height: int,
latent_width: int,
dtype: torch.dtype,
) -> Optional[List[IPAdapterData]]:
"""If IP-Adapter is enabled, then this function loads the requisite models and adds the image prompt conditioning data."""
ip_adapter_data_list = []
for single_ip_adapter, (image_prompt_embeds, uncond_image_prompt_embeds) in zip(
ip_adapters, image_prompts, strict=True
):
ip_adapter_model = exit_stack.enter_context(context.models.load(single_ip_adapter.ip_adapter_model))
) -> Optional[list[IPAdapterData]]:
"""If IP-Adapter is enabled, then this function loads the requisite models, and adds the image prompt embeddings
to the `conditioning_data` (in-place).
"""
if ip_adapter is None:
return None
mask_field = single_ip_adapter.mask
mask = context.tensors.load(mask_field.tensor_name) if mask_field is not None else None
# ip_adapter could be a list or a single IPAdapterField. Normalize to a list here.
if not isinstance(ip_adapter, list):
ip_adapter = [ip_adapter]
if len(ip_adapter) == 0:
return None
ip_adapter_data_list = []
for single_ip_adapter in ip_adapter:
ip_adapter_model: Union[IPAdapter, IPAdapterPlus] = exit_stack.enter_context(
context.models.load(single_ip_adapter.ip_adapter_model)
)
image_encoder_model_info = context.models.load(single_ip_adapter.image_encoder_model)
# `single_ip_adapter.image` could be a list or a single ImageField. Normalize to a list here.
single_ipa_image_fields = single_ip_adapter.image
if not isinstance(single_ipa_image_fields, list):
single_ipa_image_fields = [single_ipa_image_fields]
single_ipa_images = [context.images.get_pil(image.image_name) for image in single_ipa_image_fields]
# TODO(ryand): With some effort, the step of running the CLIP Vision encoder could be done before any other
# models are needed in memory. This would help to reduce peak memory utilization in low-memory environments.
with image_encoder_model_info as image_encoder_model:
assert isinstance(image_encoder_model, CLIPVisionModelWithProjection)
# Get image embeddings from CLIP and ImageProjModel.
image_prompt_embeds, uncond_image_prompt_embeds = ip_adapter_model.get_image_embeds(
single_ipa_images, image_encoder_model
)
mask = single_ip_adapter.mask
if mask is not None:
mask = context.tensors.load(mask.tensor_name)
mask = self._preprocess_regional_prompt_mask(mask, latent_height, latent_width, dtype=dtype)
ip_adapter_data_list.append(
@@ -732,7 +734,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
)
)
return ip_adapter_data_list if len(ip_adapter_data_list) > 0 else None
return ip_adapter_data_list
def run_t2i_adapters(
self,
@@ -853,16 +855,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
# At some point, someone decided that schedulers that accept a generator should use the original seed with
# all bits flipped. I don't know the original rationale for this, but now we must keep it like this for
# reproducibility.
#
# These Invoke-supported schedulers accept a generator as of 2024-06-04:
# - DDIMScheduler
# - DDPMScheduler
# - DPMSolverMultistepScheduler
# - EulerAncestralDiscreteScheduler
# - EulerDiscreteScheduler
# - KDPM2AncestralDiscreteScheduler
# - LCMScheduler
# - TCDScheduler
scheduler_step_kwargs.update({"generator": torch.Generator(device=device).manual_seed(seed ^ 0xFFFFFFFF)})
if isinstance(scheduler, TCDScheduler):
scheduler_step_kwargs.update({"eta": 1.0})
@@ -920,20 +912,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
do_classifier_free_guidance=True,
)
ip_adapters: List[IPAdapterField] = []
if self.ip_adapter is not None:
# ip_adapter could be a list or a single IPAdapterField. Normalize to a list here.
if isinstance(self.ip_adapter, list):
ip_adapters = self.ip_adapter
else:
ip_adapters = [self.ip_adapter]
# If there are IP adapters, the following line runs the adapters' CLIPVision image encoders to return
# a series of image conditioning embeddings. This is being done here rather than in the
# big model context below in order to use less VRAM on low-VRAM systems.
# The image prompts are then passed to prep_ip_adapter_data().
image_prompts = self.prep_ip_adapter_image_prompts(context=context, ip_adapters=ip_adapters)
# get the unet's config so that we can pass the base to dispatch_progress()
unet_config = context.models.get_config(self.unet.unet.key)
@@ -952,15 +930,11 @@ class DenoiseLatentsInvocation(BaseInvocation):
assert isinstance(unet_info.model, UNet2DConditionModel)
with (
ExitStack() as exit_stack,
unet_info.model_on_device() as (model_state_dict, unet),
unet_info as unet,
ModelPatcher.apply_freeu(unet, self.unet.freeu_config),
set_seamless(unet, self.unet.seamless_axes), # FIXME
# Apply the LoRA after unet has been moved to its target device for faster patching.
ModelPatcher.apply_lora_unet(
unet,
loras=_lora_loader(),
model_state_dict=model_state_dict,
),
ModelPatcher.apply_lora_unet(unet, _lora_loader()),
):
assert isinstance(unet, UNet2DConditionModel)
latents = latents.to(device=unet.device, dtype=unet.dtype)
@@ -996,8 +970,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
ip_adapter_data = self.prep_ip_adapter_data(
context=context,
ip_adapters=ip_adapters,
image_prompts=image_prompts,
ip_adapter=self.ip_adapter,
exit_stack=exit_stack,
latent_height=latent_height,
latent_width=latent_width,
@@ -1312,7 +1285,7 @@ class ImageToLatentsInvocation(BaseInvocation):
title="Blend Latents",
tags=["latents", "blend"],
category="latents",
version="1.0.3",
version="1.0.2",
)
class BlendLatentsInvocation(BaseInvocation):
"""Blend two latents using a given alpha. Latents must have same size."""
@@ -1391,7 +1364,7 @@ class BlendLatentsInvocation(BaseInvocation):
TorchDevice.empty_cache()
name = context.tensors.save(tensor=blended_latents)
return LatentsOutput.build(latents_name=name, latents=blended_latents, seed=self.latents_a.seed)
return LatentsOutput.build(latents_name=name, latents=blended_latents)
# The Crop Latents node was copied from @skunkworxdark's implementation here:

View File

@@ -1,4 +1,5 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) & the InvokeAI Team
from pathlib import Path
from typing import Literal
import cv2
@@ -9,8 +10,10 @@ from pydantic import ConfigDict
from invokeai.app.invocations.fields import ImageField
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.util.download_with_progress import download_with_progress_bar
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN
from invokeai.backend.util.devices import TorchDevice
from .baseinvocation import BaseInvocation, invocation
from .fields import InputField, WithBoard, WithMetadata
@@ -49,6 +52,7 @@ class ESRGANInvocation(BaseInvocation, WithMetadata, WithBoard):
rrdbnet_model = None
netscale = None
esrgan_model_path = None
if self.model_name in [
"RealESRGAN_x4plus.pth",
@@ -91,25 +95,28 @@ class ESRGANInvocation(BaseInvocation, WithMetadata, WithBoard):
context.logger.error(msg)
raise ValueError(msg)
loadnet = context.models.load_remote_model(
source=ESRGAN_MODEL_URLS[self.model_name],
esrgan_model_path = Path(context.config.get().models_path, f"core/upscaling/realesrgan/{self.model_name}")
# Downloads the ESRGAN model if it doesn't already exist
download_with_progress_bar(
name=self.model_name, url=ESRGAN_MODEL_URLS[self.model_name], dest_path=esrgan_model_path
)
with loadnet as loadnet_model:
upscaler = RealESRGAN(
scale=netscale,
loadnet=loadnet_model,
model=rrdbnet_model,
half=False,
tile=self.tile_size,
)
upscaler = RealESRGAN(
scale=netscale,
model_path=esrgan_model_path,
model=rrdbnet_model,
half=False,
tile=self.tile_size,
)
# prepare image - Real-ESRGAN uses cv2 internally, and cv2 uses BGR vs RGB for PIL
# TODO: This strips the alpha... is that okay?
cv2_image = cv2.cvtColor(np.array(image.convert("RGB")), cv2.COLOR_RGB2BGR)
upscaled_image = upscaler.upscale(cv2_image)
# prepare image - Real-ESRGAN uses cv2 internally, and cv2 uses BGR vs RGB for PIL
# TODO: This strips the alpha... is that okay?
cv2_image = cv2.cvtColor(np.array(image.convert("RGB")), cv2.COLOR_RGB2BGR)
upscaled_image = upscaler.upscale(cv2_image)
pil_image = Image.fromarray(cv2.cvtColor(upscaled_image, cv2.COLOR_BGR2RGB)).convert("RGBA")
pil_image = Image.fromarray(cv2.cvtColor(upscaled_image, cv2.COLOR_BGR2RGB)).convert("RGBA")
TorchDevice.empty_cache()
image_dto = context.images.save(image=pil_image)

View File

@@ -86,7 +86,6 @@ class InvokeAIAppConfig(BaseSettings):
patchmatch: Enable patchmatch inpaint code.
models_dir: Path to the models directory.
convert_cache_dir: Path to the converted models cache directory. When loading a non-diffusers model, it will be converted and store on disk at this location.
download_cache_dir: Path to the directory that contains dynamically downloaded models.
legacy_conf_dir: Path to directory of legacy checkpoint config files.
db_dir: Path to InvokeAI databases directory.
outputs_dir: Path to directory for outputs.
@@ -147,8 +146,7 @@ class InvokeAIAppConfig(BaseSettings):
# PATHS
models_dir: Path = Field(default=Path("models"), description="Path to the models directory.")
convert_cache_dir: Path = Field(default=Path("models/.convert_cache"), description="Path to the converted models cache directory. When loading a non-diffusers model, it will be converted and store on disk at this location.")
download_cache_dir: Path = Field(default=Path("models/.download_cache"), description="Path to the directory that contains dynamically downloaded models.")
convert_cache_dir: Path = Field(default=Path("models/.cache"), description="Path to the converted models cache directory. When loading a non-diffusers model, it will be converted and store on disk at this location.")
legacy_conf_dir: Path = Field(default=Path("configs"), description="Path to directory of legacy checkpoint config files.")
db_dir: Path = Field(default=Path("databases"), description="Path to InvokeAI databases directory.")
outputs_dir: Path = Field(default=Path("outputs"), description="Path to directory for outputs.")
@@ -305,11 +303,6 @@ class InvokeAIAppConfig(BaseSettings):
"""Path to the converted cache models directory, resolved to an absolute path.."""
return self._resolve(self.convert_cache_dir)
@property
def download_cache_path(self) -> Path:
"""Path to the downloaded models directory, resolved to an absolute path.."""
return self._resolve(self.download_cache_dir)
@property
def custom_nodes_path(self) -> Path:
"""Path to the custom nodes directory, resolved to an absolute path.."""

View File

@@ -1,17 +1,10 @@
"""Init file for download queue."""
from .download_base import (
DownloadJob,
DownloadJobStatus,
DownloadQueueServiceBase,
MultiFileDownloadJob,
UnknownJobIDException,
)
from .download_base import DownloadJob, DownloadJobStatus, DownloadQueueServiceBase, UnknownJobIDException
from .download_default import DownloadQueueService, TqdmProgress
__all__ = [
"DownloadJob",
"MultiFileDownloadJob",
"DownloadQueueServiceBase",
"DownloadQueueService",
"TqdmProgress",

View File

@@ -5,13 +5,11 @@ from abc import ABC, abstractmethod
from enum import Enum
from functools import total_ordering
from pathlib import Path
from typing import Any, Callable, List, Optional, Set, Union
from typing import Any, Callable, List, Optional
from pydantic import BaseModel, Field, PrivateAttr
from pydantic.networks import AnyHttpUrl
from invokeai.backend.model_manager.metadata import RemoteModelFile
class DownloadJobStatus(str, Enum):
"""State of a download job."""
@@ -35,23 +33,30 @@ class ServiceInactiveException(Exception):
"""This exception is raised when user attempts to initiate a download before the service is started."""
SingleFileDownloadEventHandler = Callable[["DownloadJob"], None]
SingleFileDownloadExceptionHandler = Callable[["DownloadJob", Optional[Exception]], None]
MultiFileDownloadEventHandler = Callable[["MultiFileDownloadJob"], None]
MultiFileDownloadExceptionHandler = Callable[["MultiFileDownloadJob", Optional[Exception]], None]
DownloadEventHandler = Union[SingleFileDownloadEventHandler, MultiFileDownloadEventHandler]
DownloadExceptionHandler = Union[SingleFileDownloadExceptionHandler, MultiFileDownloadExceptionHandler]
DownloadEventHandler = Callable[["DownloadJob"], None]
DownloadExceptionHandler = Callable[["DownloadJob", Optional[Exception]], None]
class DownloadJobBase(BaseModel):
"""Base of classes to monitor and control downloads."""
@total_ordering
class DownloadJob(BaseModel):
"""Class to monitor and control a model download request."""
# required variables to be passed in on creation
source: AnyHttpUrl = Field(description="Where to download from. Specific types specified in child classes.")
dest: Path = Field(description="Destination of downloaded model on local disk; a directory or file path")
access_token: Optional[str] = Field(default=None, description="authorization token for protected resources")
# automatically assigned on creation
id: int = Field(description="Numeric ID of this job", default=-1) # default id is a sentinel
priority: int = Field(default=10, description="Queue priority; lower values are higher priority")
dest: Path = Field(description="Initial destination of downloaded model on local disk; a directory or file path")
download_path: Optional[Path] = Field(default=None, description="Final location of downloaded file or directory")
# set internally during download process
status: DownloadJobStatus = Field(default=DownloadJobStatus.WAITING, description="Status of the download")
download_path: Optional[Path] = Field(default=None, description="Final location of downloaded file")
job_started: Optional[str] = Field(default=None, description="Timestamp for when the download job started")
job_ended: Optional[str] = Field(
default=None, description="Timestamp for when the download job ende1d (completed or errored)"
)
content_type: Optional[str] = Field(default=None, description="Content type of downloaded file")
bytes: int = Field(default=0, description="Bytes downloaded so far")
total_bytes: int = Field(default=0, description="Total file size (bytes)")
@@ -69,6 +74,14 @@ class DownloadJobBase(BaseModel):
_on_cancelled: Optional[DownloadEventHandler] = PrivateAttr(default=None)
_on_error: Optional[DownloadExceptionHandler] = PrivateAttr(default=None)
def __hash__(self) -> int:
"""Return hash of the string representation of this object, for indexing."""
return hash(str(self))
def __le__(self, other: "DownloadJob") -> bool:
"""Return True if this job's priority is less than another's."""
return self.priority <= other.priority
def cancel(self) -> None:
"""Call to cancel the job."""
self._cancelled = True
@@ -85,11 +98,6 @@ class DownloadJobBase(BaseModel):
"""Return true if job completed without errors."""
return self.status == DownloadJobStatus.COMPLETED
@property
def waiting(self) -> bool:
"""Return true if the job is waiting to run."""
return self.status == DownloadJobStatus.WAITING
@property
def running(self) -> bool:
"""Return true if the job is running."""
@@ -146,37 +154,6 @@ class DownloadJobBase(BaseModel):
self._on_cancelled = on_cancelled
@total_ordering
class DownloadJob(DownloadJobBase):
"""Class to monitor and control a model download request."""
# required variables to be passed in on creation
source: AnyHttpUrl = Field(description="Where to download from. Specific types specified in child classes.")
access_token: Optional[str] = Field(default=None, description="authorization token for protected resources")
priority: int = Field(default=10, description="Queue priority; lower values are higher priority")
# set internally during download process
job_started: Optional[str] = Field(default=None, description="Timestamp for when the download job started")
job_ended: Optional[str] = Field(
default=None, description="Timestamp for when the download job ende1d (completed or errored)"
)
content_type: Optional[str] = Field(default=None, description="Content type of downloaded file")
def __hash__(self) -> int:
"""Return hash of the string representation of this object, for indexing."""
return hash(str(self))
def __le__(self, other: "DownloadJob") -> bool:
"""Return True if this job's priority is less than another's."""
return self.priority <= other.priority
class MultiFileDownloadJob(DownloadJobBase):
"""Class to monitor and control multifile downloads."""
download_parts: Set[DownloadJob] = Field(default_factory=set, description="List of download parts.")
class DownloadQueueServiceBase(ABC):
"""Multithreaded queue for downloading models via URL."""
@@ -224,48 +201,6 @@ class DownloadQueueServiceBase(ABC):
"""
pass
@abstractmethod
def multifile_download(
self,
parts: List[RemoteModelFile],
dest: Path,
access_token: Optional[str] = None,
submit_job: bool = True,
on_start: Optional[DownloadEventHandler] = None,
on_progress: Optional[DownloadEventHandler] = None,
on_complete: Optional[DownloadEventHandler] = None,
on_cancelled: Optional[DownloadEventHandler] = None,
on_error: Optional[DownloadExceptionHandler] = None,
) -> MultiFileDownloadJob:
"""
Create and enqueue a multifile download job.
:param parts: Set of URL / filename pairs
:param dest: Path to download to. See below.
:param access_token: Access token to download the indicated files. If not provided,
each file's URL may be matched to an access token using the config file matching
system.
:param submit_job: If true [default] then submit the job for execution. Otherwise,
you will need to pass the job to submit_multifile_download().
:param on_start, on_progress, on_complete, on_error: Callbacks for the indicated
events.
:returns: A MultiFileDownloadJob object for monitoring the state of the download.
The `dest` argument is a Path object pointing to a directory. All downloads
with be placed inside this directory. The callbacks will receive the
MultiFileDownloadJob.
"""
pass
@abstractmethod
def submit_multifile_download(self, job: MultiFileDownloadJob) -> None:
"""
Enqueue a previously-created multi-file download job.
:param job: A MultiFileDownloadJob created with multifile_download()
"""
pass
@abstractmethod
def submit_download_job(
self,
@@ -317,7 +252,7 @@ class DownloadQueueServiceBase(ABC):
pass
@abstractmethod
def cancel_job(self, job: DownloadJobBase) -> None:
def cancel_job(self, job: DownloadJob) -> None:
"""Cancel the job, clearing partial downloads and putting it into ERROR state."""
pass
@@ -327,7 +262,7 @@ class DownloadQueueServiceBase(ABC):
pass
@abstractmethod
def wait_for_job(self, job: DownloadJobBase, timeout: int = 0) -> DownloadJobBase:
def wait_for_job(self, job: DownloadJob, timeout: int = 0) -> DownloadJob:
"""Wait until the indicated download job has reached a terminal state.
This will block until the indicated install job has completed,

View File

@@ -8,28 +8,23 @@ import time
import traceback
from pathlib import Path
from queue import Empty, PriorityQueue
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Set
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set
import requests
from pydantic.networks import AnyHttpUrl
from requests import HTTPError
from tqdm import tqdm
from invokeai.app.services.config import InvokeAIAppConfig, get_config
from invokeai.app.services.events.events_base import EventServiceBase
from invokeai.app.util.misc import get_iso_timestamp
from invokeai.backend.model_manager.metadata import RemoteModelFile
from invokeai.backend.util.logging import InvokeAILogger
from .download_base import (
DownloadEventHandler,
DownloadExceptionHandler,
DownloadJob,
DownloadJobBase,
DownloadJobCancelledException,
DownloadJobStatus,
DownloadQueueServiceBase,
MultiFileDownloadJob,
ServiceInactiveException,
UnknownJobIDException,
)
@@ -47,24 +42,20 @@ class DownloadQueueService(DownloadQueueServiceBase):
def __init__(
self,
max_parallel_dl: int = 5,
app_config: Optional[InvokeAIAppConfig] = None,
event_bus: Optional["EventServiceBase"] = None,
requests_session: Optional[requests.sessions.Session] = None,
):
"""
Initialize DownloadQueue.
:param app_config: InvokeAIAppConfig object
:param max_parallel_dl: Number of simultaneous downloads allowed [5].
:param requests_session: Optional requests.sessions.Session object, for unit tests.
"""
self._app_config = app_config or get_config()
self._jobs: Dict[int, DownloadJob] = {}
self._download_part2parent: Dict[AnyHttpUrl, MultiFileDownloadJob] = {}
self._next_job_id = 0
self._queue: PriorityQueue[DownloadJob] = PriorityQueue()
self._stop_event = threading.Event()
self._job_terminated_event = threading.Event()
self._job_completed_event = threading.Event()
self._worker_pool: Set[threading.Thread] = set()
self._lock = threading.Lock()
self._logger = InvokeAILogger.get_logger("DownloadQueueService")
@@ -116,16 +107,18 @@ class DownloadQueueService(DownloadQueueServiceBase):
raise ServiceInactiveException(
"The download service is not currently accepting requests. Please call start() to initialize the service."
)
job.id = self._next_id()
job.set_callbacks(
on_start=on_start,
on_progress=on_progress,
on_complete=on_complete,
on_cancelled=on_cancelled,
on_error=on_error,
)
self._jobs[job.id] = job
self._queue.put(job)
with self._lock:
job.id = self._next_job_id
self._next_job_id += 1
job.set_callbacks(
on_start=on_start,
on_progress=on_progress,
on_complete=on_complete,
on_cancelled=on_cancelled,
on_error=on_error,
)
self._jobs[job.id] = job
self._queue.put(job)
def download(
self,
@@ -148,7 +141,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
source=source,
dest=dest,
priority=priority,
access_token=access_token or self._lookup_access_token(source),
access_token=access_token,
)
self.submit_download_job(
job,
@@ -160,63 +153,10 @@ class DownloadQueueService(DownloadQueueServiceBase):
)
return job
def multifile_download(
self,
parts: List[RemoteModelFile],
dest: Path,
access_token: Optional[str] = None,
submit_job: bool = True,
on_start: Optional[DownloadEventHandler] = None,
on_progress: Optional[DownloadEventHandler] = None,
on_complete: Optional[DownloadEventHandler] = None,
on_cancelled: Optional[DownloadEventHandler] = None,
on_error: Optional[DownloadExceptionHandler] = None,
) -> MultiFileDownloadJob:
mfdj = MultiFileDownloadJob(dest=dest, id=self._next_id())
mfdj.set_callbacks(
on_start=on_start,
on_progress=on_progress,
on_complete=on_complete,
on_cancelled=on_cancelled,
on_error=on_error,
)
for part in parts:
url = part.url
path = dest / part.path
assert path.is_relative_to(dest), "only relative download paths accepted"
job = DownloadJob(
source=url,
dest=path,
access_token=access_token,
)
mfdj.download_parts.add(job)
self._download_part2parent[job.source] = mfdj
if submit_job:
self.submit_multifile_download(mfdj)
return mfdj
def submit_multifile_download(self, job: MultiFileDownloadJob) -> None:
for download_job in job.download_parts:
self.submit_download_job(
download_job,
on_start=self._mfd_started,
on_progress=self._mfd_progress,
on_complete=self._mfd_complete,
on_cancelled=self._mfd_cancelled,
on_error=self._mfd_error,
)
def join(self) -> None:
"""Wait for all jobs to complete."""
self._queue.join()
def _next_id(self) -> int:
with self._lock:
id = self._next_job_id
self._next_job_id += 1
return id
def list_jobs(self) -> List[DownloadJob]:
"""List all the jobs."""
return list(self._jobs.values())
@@ -238,14 +178,14 @@ class DownloadQueueService(DownloadQueueServiceBase):
except KeyError as excp:
raise UnknownJobIDException("Unrecognized job") from excp
def cancel_job(self, job: DownloadJobBase) -> None:
def cancel_job(self, job: DownloadJob) -> None:
"""
Cancel the indicated job.
If it is running it will be stopped.
job.status will be set to DownloadJobStatus.CANCELLED
"""
if job.status in [DownloadJobStatus.WAITING, DownloadJobStatus.RUNNING]:
with self._lock:
job.cancel()
def cancel_all_jobs(self) -> None:
@@ -254,12 +194,12 @@ class DownloadQueueService(DownloadQueueServiceBase):
if not job.in_terminal_state:
self.cancel_job(job)
def wait_for_job(self, job: DownloadJobBase, timeout: int = 0) -> DownloadJobBase:
def wait_for_job(self, job: DownloadJob, timeout: int = 0) -> DownloadJob:
"""Block until the indicated job has reached terminal state, or when timeout limit reached."""
start = time.time()
while not job.in_terminal_state:
if self._job_terminated_event.wait(timeout=0.25): # in case we miss an event
self._job_terminated_event.clear()
if self._job_completed_event.wait(timeout=0.25): # in case we miss an event
self._job_completed_event.clear()
if timeout > 0 and time.time() - start > timeout:
raise TimeoutError("Timeout exceeded")
return job
@@ -288,25 +228,22 @@ class DownloadQueueService(DownloadQueueServiceBase):
job.job_started = get_iso_timestamp()
self._do_download(job)
self._signal_job_complete(job)
except DownloadJobCancelledException:
self._signal_job_cancelled(job)
self._cleanup_cancelled_job(job)
except Exception as excp:
except (OSError, HTTPError) as excp:
job.error_type = excp.__class__.__name__ + f"({str(excp)})"
job.error = traceback.format_exc()
self._signal_job_error(job, excp)
except DownloadJobCancelledException:
self._signal_job_cancelled(job)
self._cleanup_cancelled_job(job)
finally:
job.job_ended = get_iso_timestamp()
self._job_terminated_event.set() # signal a change to terminal state
self._download_part2parent.pop(job.source, None) # if this is a subpart of a multipart job, remove it
self._job_terminated_event.set()
self._job_completed_event.set() # signal a change to terminal state
self._queue.task_done()
self._logger.debug(f"Download queue worker thread {threading.current_thread().name} exiting.")
def _do_download(self, job: DownloadJob) -> None:
"""Do the actual download."""
url = job.source
header = {"Authorization": f"Bearer {job.access_token}"} if job.access_token else {}
open_mode = "wb"
@@ -398,29 +335,38 @@ class DownloadQueueService(DownloadQueueServiceBase):
def _in_progress_path(self, path: Path) -> Path:
return path.with_name(path.name + ".downloading")
def _lookup_access_token(self, source: AnyHttpUrl) -> Optional[str]:
# Pull the token from config if it exists and matches the URL
token = None
for pair in self._app_config.remote_api_tokens or []:
if re.search(pair.url_regex, str(source)):
token = pair.token
break
return token
def _signal_job_started(self, job: DownloadJob) -> None:
job.status = DownloadJobStatus.RUNNING
self._execute_cb(job, "on_start")
if job.on_start:
try:
job.on_start(job)
except Exception as e:
self._logger.error(
f"An error occurred while processing the on_start callback: {traceback.format_exception(e)}"
)
if self._event_bus:
self._event_bus.emit_download_started(job)
def _signal_job_progress(self, job: DownloadJob) -> None:
self._execute_cb(job, "on_progress")
if job.on_progress:
try:
job.on_progress(job)
except Exception as e:
self._logger.error(
f"An error occurred while processing the on_progress callback: {traceback.format_exception(e)}"
)
if self._event_bus:
self._event_bus.emit_download_progress(job)
def _signal_job_complete(self, job: DownloadJob) -> None:
job.status = DownloadJobStatus.COMPLETED
self._execute_cb(job, "on_complete")
if job.on_complete:
try:
job.on_complete(job)
except Exception as e:
self._logger.error(
f"An error occurred while processing the on_complete callback: {traceback.format_exception(e)}"
)
if self._event_bus:
self._event_bus.emit_download_complete(job)
@@ -428,21 +374,26 @@ class DownloadQueueService(DownloadQueueServiceBase):
if job.status not in [DownloadJobStatus.RUNNING, DownloadJobStatus.WAITING]:
return
job.status = DownloadJobStatus.CANCELLED
self._execute_cb(job, "on_cancelled")
if job.on_cancelled:
try:
job.on_cancelled(job)
except Exception as e:
self._logger.error(
f"An error occurred while processing the on_cancelled callback: {traceback.format_exception(e)}"
)
if self._event_bus:
self._event_bus.emit_download_cancelled(job)
# if multifile download, then signal the parent
if parent_job := self._download_part2parent.get(job.source, None):
if not parent_job.in_terminal_state:
parent_job.status = DownloadJobStatus.CANCELLED
self._execute_cb(parent_job, "on_cancelled")
def _signal_job_error(self, job: DownloadJob, excp: Optional[Exception] = None) -> None:
job.status = DownloadJobStatus.ERROR
self._logger.error(f"{str(job.source)}: {traceback.format_exception(excp)}")
self._execute_cb(job, "on_error", excp)
if job.on_error:
try:
job.on_error(job, excp)
except Exception as e:
self._logger.error(
f"An error occurred while processing the on_error callback: {traceback.format_exception(e)}"
)
if self._event_bus:
self._event_bus.emit_download_error(job)
@@ -455,97 +406,6 @@ class DownloadQueueService(DownloadQueueServiceBase):
except OSError as excp:
self._logger.warning(excp)
########################################
# callbacks used for multifile downloads
########################################
def _mfd_started(self, download_job: DownloadJob) -> None:
self._logger.info(f"File download started: {download_job.source}")
with self._lock:
mf_job = self._download_part2parent[download_job.source]
if mf_job.waiting:
mf_job.total_bytes = sum(x.total_bytes for x in mf_job.download_parts)
mf_job.status = DownloadJobStatus.RUNNING
assert download_job.download_path is not None
path_relative_to_destdir = download_job.download_path.relative_to(mf_job.dest)
mf_job.download_path = (
mf_job.dest / path_relative_to_destdir.parts[0]
) # keep just the first component of the path
self._execute_cb(mf_job, "on_start")
def _mfd_progress(self, download_job: DownloadJob) -> None:
with self._lock:
mf_job = self._download_part2parent[download_job.source]
if mf_job.cancelled:
for part in mf_job.download_parts:
self.cancel_job(part)
elif mf_job.running:
mf_job.total_bytes = sum(x.total_bytes for x in mf_job.download_parts)
mf_job.bytes = sum(x.total_bytes for x in mf_job.download_parts)
self._execute_cb(mf_job, "on_progress")
def _mfd_complete(self, download_job: DownloadJob) -> None:
self._logger.info(f"Download complete: {download_job.source}")
with self._lock:
mf_job = self._download_part2parent[download_job.source]
# are there any more active jobs left in this task?
if mf_job.running and all(x.complete for x in mf_job.download_parts):
mf_job.status = DownloadJobStatus.COMPLETED
self._execute_cb(mf_job, "on_complete")
# we're done with this sub-job
self._job_terminated_event.set()
def _mfd_cancelled(self, download_job: DownloadJob) -> None:
with self._lock:
mf_job = self._download_part2parent[download_job.source]
assert mf_job is not None
if not mf_job.in_terminal_state:
self._logger.warning(f"Download cancelled: {download_job.source}")
mf_job.cancel()
for s in mf_job.download_parts:
self.cancel_job(s)
def _mfd_error(self, download_job: DownloadJob, excp: Optional[Exception] = None) -> None:
with self._lock:
mf_job = self._download_part2parent[download_job.source]
assert mf_job is not None
if not mf_job.in_terminal_state:
mf_job.status = download_job.status
mf_job.error = download_job.error
mf_job.error_type = download_job.error_type
self._execute_cb(mf_job, "on_error", excp)
self._logger.error(
f"Cancelling {mf_job.dest} due to an error while downloading {download_job.source}: {str(excp)}"
)
for s in [x for x in mf_job.download_parts if x.running]:
self.cancel_job(s)
self._download_part2parent.pop(download_job.source)
self._job_terminated_event.set()
def _execute_cb(
self,
job: DownloadJob | MultiFileDownloadJob,
callback_name: Literal[
"on_start",
"on_progress",
"on_complete",
"on_cancelled",
"on_error",
],
excp: Optional[Exception] = None,
) -> None:
if callback := getattr(job, callback_name, None):
args = [job, excp] if excp else [job]
try:
callback(*args)
except Exception as e:
self._logger.error(
f"An error occurred while processing the {callback_name} callback: {traceback.format_exception(e)}"
)
def get_pc_name_max(directory: str) -> int:
if hasattr(os, "pathconf"):

View File

@@ -3,8 +3,9 @@ from typing import TYPE_CHECKING, Any, ClassVar, Coroutine, Generic, Optional, P
from fastapi_events.handlers.local import local_handler
from fastapi_events.registry.payload_schema import registry as payload_schema
from pydantic import BaseModel, ConfigDict, Field
from pydantic import BaseModel, ConfigDict, Field, SerializeAsAny, field_validator
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
from invokeai.app.services.session_queue.session_queue_common import (
QUEUE_ITEM_STATUS,
@@ -13,7 +14,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
SessionQueueItem,
SessionQueueStatus,
)
from invokeai.app.services.shared.graph import AnyInvocation, AnyInvocationOutput
from invokeai.app.util.misc import get_timestamp
from invokeai.backend.model_manager.config import AnyModelConfig, SubModelType
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
@@ -98,9 +98,17 @@ class InvocationEventBase(QueueItemEventBase):
item_id: int = Field(description="The ID of the queue item")
batch_id: str = Field(description="The ID of the queue batch")
session_id: str = Field(description="The ID of the session (aka graph execution state)")
invocation: AnyInvocation = Field(description="The ID of the invocation")
invocation: SerializeAsAny[BaseInvocation] = Field(description="The ID of the invocation")
invocation_source_id: str = Field(description="The ID of the prepared invocation's source node")
@field_validator("invocation", mode="plain")
@classmethod
def validate_invocation(cls, v: Any):
"""Validates the invocation using the dynamic type adapter."""
invocation = BaseInvocation.get_typeadapter().validate_python(v)
return invocation
@payload_schema.register
class InvocationStartedEvent(InvocationEventBase):
@@ -109,7 +117,7 @@ class InvocationStartedEvent(InvocationEventBase):
__event_name__ = "invocation_started"
@classmethod
def build(cls, queue_item: SessionQueueItem, invocation: AnyInvocation) -> "InvocationStartedEvent":
def build(cls, queue_item: SessionQueueItem, invocation: BaseInvocation) -> "InvocationStartedEvent":
return cls(
queue_id=queue_item.queue_id,
item_id=queue_item.item_id,
@@ -136,7 +144,7 @@ class InvocationDenoiseProgressEvent(InvocationEventBase):
def build(
cls,
queue_item: SessionQueueItem,
invocation: AnyInvocation,
invocation: BaseInvocation,
intermediate_state: PipelineIntermediateState,
progress_image: ProgressImage,
) -> "InvocationDenoiseProgressEvent":
@@ -174,11 +182,19 @@ class InvocationCompleteEvent(InvocationEventBase):
__event_name__ = "invocation_complete"
result: AnyInvocationOutput = Field(description="The result of the invocation")
result: SerializeAsAny[BaseInvocationOutput] = Field(description="The result of the invocation")
@field_validator("result", mode="plain")
@classmethod
def validate_results(cls, v: Any):
"""Validates the invocation result using the dynamic type adapter."""
result = BaseInvocationOutput.get_typeadapter().validate_python(v)
return result
@classmethod
def build(
cls, queue_item: SessionQueueItem, invocation: AnyInvocation, result: AnyInvocationOutput
cls, queue_item: SessionQueueItem, invocation: BaseInvocation, result: BaseInvocationOutput
) -> "InvocationCompleteEvent":
return cls(
queue_id=queue_item.queue_id,
@@ -207,7 +223,7 @@ class InvocationErrorEvent(InvocationEventBase):
def build(
cls,
queue_item: SessionQueueItem,
invocation: AnyInvocation,
invocation: BaseInvocation,
error_type: str,
error_message: str,
error_traceback: str,

View File

@@ -13,7 +13,7 @@ from invokeai.app.services.events.events_base import EventServiceBase
from invokeai.app.services.invoker import Invoker
from invokeai.app.services.model_install.model_install_common import ModelInstallJob, ModelSource
from invokeai.app.services.model_records import ModelRecordServiceBase
from invokeai.backend.model_manager import AnyModelConfig
from invokeai.backend.model_manager.config import AnyModelConfig
class ModelInstallServiceBase(ABC):
@@ -243,11 +243,12 @@ class ModelInstallServiceBase(ABC):
"""
@abstractmethod
def download_and_cache_model(self, source: str | AnyHttpUrl) -> Path:
def download_and_cache(self, source: Union[str, AnyHttpUrl], access_token: Optional[str] = None) -> Path:
"""
Download the model file located at source to the models cache and return its Path.
:param source: A string representing a URL or repo_id.
:param source: A Url or a string that can be converted into one.
:param access_token: Optional access token to access restricted resources.
The model file will be downloaded into the system-wide model cache
(`models/.cache`) if it isn't already there. Note that the model cache

View File

@@ -8,7 +8,7 @@ from pydantic import BaseModel, Field, PrivateAttr, field_validator
from pydantic.networks import AnyHttpUrl
from typing_extensions import Annotated
from invokeai.app.services.download import DownloadJob, MultiFileDownloadJob
from invokeai.app.services.download import DownloadJob
from invokeai.backend.model_manager import AnyModelConfig, ModelRepoVariant
from invokeai.backend.model_manager.config import ModelSourceType
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
@@ -26,6 +26,13 @@ class InstallStatus(str, Enum):
CANCELLED = "cancelled" # terminated with an error message
class ModelInstallPart(BaseModel):
url: AnyHttpUrl
path: Path
bytes: int = 0
total_bytes: int = 0
class UnknownInstallJobException(Exception):
"""Raised when the status of an unknown job is requested."""
@@ -162,7 +169,6 @@ class ModelInstallJob(BaseModel):
)
# internal flags and transitory settings
_install_tmpdir: Optional[Path] = PrivateAttr(default=None)
_multifile_job: Optional[MultiFileDownloadJob] = PrivateAttr(default=None)
_exception: Optional[Exception] = PrivateAttr(default=None)
def set_error(self, e: Exception) -> None:

View File

@@ -5,22 +5,21 @@ import os
import re
import threading
import time
from hashlib import sha256
from pathlib import Path
from queue import Empty, Queue
from shutil import copyfile, copytree, move, rmtree
from tempfile import mkdtemp
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
import torch
import yaml
from huggingface_hub import HfFolder
from pydantic.networks import AnyHttpUrl
from pydantic_core import Url
from requests import Session
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.download import DownloadQueueServiceBase, MultiFileDownloadJob
from invokeai.app.services.events.events_base import EventServiceBase
from invokeai.app.services.download import DownloadJob, DownloadQueueServiceBase, TqdmProgress
from invokeai.app.services.invoker import Invoker
from invokeai.app.services.model_install.model_install_base import ModelInstallServiceBase
from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase
@@ -45,7 +44,6 @@ from invokeai.backend.model_manager.search import ModelSearch
from invokeai.backend.util import InvokeAILogger
from invokeai.backend.util.catch_sigint import catch_sigint
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.util import slugify
from .model_install_common import (
MODEL_SOURCE_TO_TYPE_MAP,
@@ -93,7 +91,7 @@ class ModelInstallService(ModelInstallServiceBase):
self._downloads_changed_event = threading.Event()
self._install_completed_event = threading.Event()
self._download_queue = download_queue
self._download_cache: Dict[int, ModelInstallJob] = {}
self._download_cache: Dict[AnyHttpUrl, ModelInstallJob] = {}
self._running = False
self._session = session
self._install_thread: Optional[threading.Thread] = None
@@ -212,12 +210,33 @@ class ModelInstallService(ModelInstallServiceBase):
access_token: Optional[str] = None,
inplace: Optional[bool] = False,
) -> ModelInstallJob:
"""Install a model using pattern matching to infer the type of source."""
source_obj = self._guess_source(source)
if isinstance(source_obj, LocalModelSource):
source_obj.inplace = inplace
elif isinstance(source_obj, HFModelSource) or isinstance(source_obj, URLModelSource):
source_obj.access_token = access_token
variants = "|".join(ModelRepoVariant.__members__.values())
hf_repoid_re = f"^([^/:]+/[^/:]+)(?::({variants})?(?::/?([^:]+))?)?$"
source_obj: Optional[StringLikeSource] = None
if Path(source).exists(): # A local file or directory
source_obj = LocalModelSource(path=Path(source), inplace=inplace)
elif match := re.match(hf_repoid_re, source):
source_obj = HFModelSource(
repo_id=match.group(1),
variant=match.group(2) if match.group(2) else None, # pass None rather than ''
subfolder=Path(match.group(3)) if match.group(3) else None,
access_token=access_token,
)
elif re.match(r"^https?://[^/]+", source):
# Pull the token from config if it exists and matches the URL
_token = access_token
if _token is None:
for pair in self.app_config.remote_api_tokens or []:
if re.search(pair.url_regex, source):
_token = pair.token
break
source_obj = URLModelSource(
url=AnyHttpUrl(source),
access_token=_token,
)
else:
raise ValueError(f"Unsupported model source: '{source}'")
return self.import_model(source_obj, config)
def import_model(self, source: ModelSource, config: Optional[Dict[str, Any]] = None) -> ModelInstallJob: # noqa D102
@@ -278,9 +297,8 @@ class ModelInstallService(ModelInstallServiceBase):
def cancel_job(self, job: ModelInstallJob) -> None:
"""Cancel the indicated job."""
job.cancel()
self._logger.warning(f"Cancelling {job.source}")
if dj := job._multifile_job:
self._download_queue.cancel_job(dj)
with self._lock:
self._cancel_download_parts(job)
def prune_jobs(self) -> None:
"""Prune all completed and errored jobs."""
@@ -328,7 +346,7 @@ class ModelInstallService(ModelInstallServiceBase):
legacy_config_path = stanza.get("config")
if legacy_config_path:
# In v3, these paths were relative to the root. Migrate them to be relative to the legacy_conf_dir.
legacy_config_path = self._app_config.root_path / legacy_config_path
legacy_config_path: Path = self._app_config.root_path / legacy_config_path
if legacy_config_path.is_relative_to(self._app_config.legacy_conf_path):
legacy_config_path = legacy_config_path.relative_to(self._app_config.legacy_conf_path)
config["config_path"] = str(legacy_config_path)
@@ -368,92 +386,38 @@ class ModelInstallService(ModelInstallServiceBase):
rmtree(model_path)
self.unregister(key)
@classmethod
def _download_cache_path(cls, source: Union[str, AnyHttpUrl], app_config: InvokeAIAppConfig) -> Path:
escaped_source = slugify(str(source))
return app_config.download_cache_path / escaped_source
def download_and_cache_model(
def download_and_cache(
self,
source: str | AnyHttpUrl,
source: Union[str, AnyHttpUrl],
access_token: Optional[str] = None,
timeout: int = 0,
) -> Path:
"""Download the model file located at source to the models cache and return its Path."""
model_path = self._download_cache_path(str(source), self._app_config)
model_hash = sha256(str(source).encode("utf-8")).hexdigest()[0:32]
model_path = self._app_config.convert_cache_path / model_hash
# We expect the cache directory to contain one and only one downloaded file or directory.
# We expect the cache directory to contain one and only one downloaded file.
# We don't know the file's name in advance, as it is set by the download
# content-disposition header.
if model_path.exists():
contents: List[Path] = list(model_path.iterdir())
contents = [x for x in model_path.iterdir() if x.is_file()]
if len(contents) > 0:
return contents[0]
model_path.mkdir(parents=True, exist_ok=True)
model_source = self._guess_source(str(source))
remote_files, _ = self._remote_files_from_source(model_source)
job = self._multifile_download(
job = self._download_queue.download(
source=AnyHttpUrl(str(source)),
dest=model_path,
remote_files=remote_files,
subfolder=model_source.subfolder if isinstance(model_source, HFModelSource) else None,
access_token=access_token,
on_progress=TqdmProgress().update,
)
files_string = "file" if len(remote_files) == 1 else "files"
self._logger.info(f"Queuing model download: {source} ({len(remote_files)} {files_string})")
self._download_queue.wait_for_job(job)
self._download_queue.wait_for_job(job, timeout)
if job.complete:
assert job.download_path is not None
return job.download_path
else:
raise Exception(job.error)
def _remote_files_from_source(
self, source: ModelSource
) -> Tuple[List[RemoteModelFile], Optional[AnyModelRepoMetadata]]:
metadata = None
if isinstance(source, HFModelSource):
metadata = HuggingFaceMetadataFetch(self._session).from_id(source.repo_id, source.variant)
assert isinstance(metadata, ModelMetadataWithFiles)
return metadata.download_urls(
variant=source.variant or self._guess_variant(),
subfolder=source.subfolder,
session=self._session,
), metadata
if isinstance(source, URLModelSource):
try:
fetcher = self.get_fetcher_from_url(str(source.url))
kwargs: dict[str, Any] = {"session": self._session}
metadata = fetcher(**kwargs).from_url(source.url)
assert isinstance(metadata, ModelMetadataWithFiles)
return metadata.download_urls(session=self._session), metadata
except ValueError:
pass
return [RemoteModelFile(url=source.url, path=Path("."), size=0)], None
raise Exception(f"No files associated with {source}")
def _guess_source(self, source: str) -> ModelSource:
"""Turn a source string into a ModelSource object."""
variants = "|".join(ModelRepoVariant.__members__.values())
hf_repoid_re = f"^([^/:]+/[^/:]+)(?::({variants})?(?::/?([^:]+))?)?$"
source_obj: Optional[StringLikeSource] = None
if Path(source).exists(): # A local file or directory
source_obj = LocalModelSource(path=Path(source))
elif match := re.match(hf_repoid_re, source):
source_obj = HFModelSource(
repo_id=match.group(1),
variant=ModelRepoVariant(match.group(2)) if match.group(2) else None, # pass None rather than ''
subfolder=Path(match.group(3)) if match.group(3) else None,
)
elif re.match(r"^https?://[^/]+", source):
source_obj = URLModelSource(
url=Url(source),
)
else:
raise ValueError(f"Unsupported model source: '{source}'")
return source_obj
# --------------------------------------------------------------------------------------------
# Internal functions that manage the installer threads
# --------------------------------------------------------------------------------------------
@@ -514,19 +478,16 @@ class ModelInstallService(ModelInstallServiceBase):
job.config_out = self.record_store.get_model(key)
self._signal_job_completed(job)
def _set_error(self, install_job: ModelInstallJob, excp: Exception) -> None:
multifile_download_job = install_job._multifile_job
if multifile_download_job and any(
x.content_type is not None and "text/html" in x.content_type for x in multifile_download_job.download_parts
):
install_job.set_error(
def _set_error(self, job: ModelInstallJob, excp: Exception) -> None:
if any(x.content_type is not None and "text/html" in x.content_type for x in job.download_parts):
job.set_error(
InvalidModelConfigException(
f"At least one file in {install_job.local_path} is an HTML page, not a model. This can happen when an access token is required to download."
f"At least one file in {job.local_path} is an HTML page, not a model. This can happen when an access token is required to download."
)
)
else:
install_job.set_error(excp)
self._signal_job_errored(install_job)
job.set_error(excp)
self._signal_job_errored(job)
# --------------------------------------------------------------------------------------------
# Internal functions that manage the models directory
@@ -552,6 +513,7 @@ class ModelInstallService(ModelInstallServiceBase):
This is typically only used during testing with a new DB or when using the memory DB, because those are the
only situations in which we may have orphaned models in the models directory.
"""
installed_model_paths = {
(self._app_config.models_path / x.path).resolve() for x in self.record_store.all_models()
}
@@ -563,13 +525,8 @@ class ModelInstallService(ModelInstallServiceBase):
if resolved_path in installed_model_paths:
return True
# Skip core models entirely - these aren't registered with the model manager.
for special_directory in [
self.app_config.models_path / "core",
self.app_config.convert_cache_dir,
self.app_config.download_cache_dir,
]:
if resolved_path.is_relative_to(special_directory):
return False
if str(resolved_path).startswith(str(self.app_config.models_path / "core")):
return False
try:
model_id = self.register_path(model_path)
self._logger.info(f"Registered {model_path.name} with id {model_id}")
@@ -684,15 +641,20 @@ class ModelInstallService(ModelInstallServiceBase):
inplace=source.inplace or False,
)
def _import_from_hf(
self,
source: HFModelSource,
config: Optional[Dict[str, Any]] = None,
) -> ModelInstallJob:
def _import_from_hf(self, source: HFModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
# Add user's cached access token to HuggingFace requests
if source.access_token is None:
source.access_token = HfFolder.get_token()
remote_files, metadata = self._remote_files_from_source(source)
source.access_token = source.access_token or HfFolder.get_token()
if not source.access_token:
self._logger.info("No HuggingFace access token present; some models may not be downloadable.")
metadata = HuggingFaceMetadataFetch(self._session).from_id(source.repo_id, source.variant)
assert isinstance(metadata, ModelMetadataWithFiles)
remote_files = metadata.download_urls(
variant=source.variant or self._guess_variant(),
subfolder=source.subfolder,
session=self._session,
)
return self._import_remote_model(
source=source,
config=config,
@@ -700,12 +662,22 @@ class ModelInstallService(ModelInstallServiceBase):
metadata=metadata,
)
def _import_from_url(
self,
source: URLModelSource,
config: Optional[Dict[str, Any]],
) -> ModelInstallJob:
remote_files, metadata = self._remote_files_from_source(source)
def _import_from_url(self, source: URLModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
# URLs from HuggingFace will be handled specially
metadata = None
fetcher = None
try:
fetcher = self.get_fetcher_from_url(str(source.url))
except ValueError:
pass
kwargs: dict[str, Any] = {"session": self._session}
if fetcher is not None:
metadata = fetcher(**kwargs).from_url(source.url)
self._logger.debug(f"metadata={metadata}")
if metadata and isinstance(metadata, ModelMetadataWithFiles):
remote_files = metadata.download_urls(session=self._session)
else:
remote_files = [RemoteModelFile(url=source.url, path=Path("."), size=0)]
return self._import_remote_model(
source=source,
config=config,
@@ -720,9 +692,12 @@ class ModelInstallService(ModelInstallServiceBase):
metadata: Optional[AnyModelRepoMetadata],
config: Optional[Dict[str, Any]],
) -> ModelInstallJob:
# TODO: Replace with tempfile.tmpdir() when multithreading is cleaned up.
# Currently the tmpdir isn't automatically removed at exit because it is
# being held in a daemon thread.
if len(remote_files) == 0:
raise ValueError(f"{source}: No downloadable files found")
destdir = Path(
tmpdir = Path(
mkdtemp(
dir=self._app_config.models_path,
prefix=TMPDIR_PREFIX,
@@ -733,28 +708,55 @@ class ModelInstallService(ModelInstallServiceBase):
source=source,
config_in=config or {},
source_metadata=metadata,
local_path=destdir, # local path may change once the download has started due to content-disposition handling
local_path=tmpdir, # local path may change once the download has started due to content-disposition handling
bytes=0,
total_bytes=0,
)
# remember the temporary directory for later removal
install_job._install_tmpdir = destdir
install_job.total_bytes = sum((x.size or 0) for x in remote_files)
# In the event that there is a subfolder specified in the source,
# we need to remove it from the destination path in order to avoid
# creating unwanted subfolders
if isinstance(source, HFModelSource) and source.subfolder:
root = Path(remote_files[0].path.parts[0])
subfolder = root / source.subfolder
else:
root = Path(".")
subfolder = Path(".")
multifile_job = self._multifile_download(
remote_files=remote_files,
dest=destdir,
subfolder=source.subfolder if isinstance(source, HFModelSource) else None,
access_token=source.access_token,
submit_job=False, # Important! Don't submit the job until we have set our _download_cache dict
)
self._download_cache[multifile_job.id] = install_job
install_job._multifile_job = multifile_job
# we remember the path up to the top of the tmpdir so that it may be
# removed safely at the end of the install process.
install_job._install_tmpdir = tmpdir
assert install_job.total_bytes is not None # to avoid type checking complaints in the loop below
files_string = "file" if len(remote_files) == 1 else "files"
self._logger.info(f"Queueing model install: {source} ({len(remote_files)} {files_string})")
files_string = "file" if len(remote_files) == 1 else "file"
self._logger.info(f"Queuing model install: {source} ({len(remote_files)} {files_string})")
self._logger.debug(f"remote_files={remote_files}")
self._download_queue.submit_multifile_download(multifile_job)
for model_file in remote_files:
url = model_file.url
path = root / model_file.path.relative_to(subfolder)
self._logger.debug(f"Downloading {url} => {path}")
install_job.total_bytes += model_file.size
assert hasattr(source, "access_token")
dest = tmpdir / path.parent
dest.mkdir(parents=True, exist_ok=True)
download_job = DownloadJob(
source=url,
dest=dest,
access_token=source.access_token,
)
self._download_cache[download_job.source] = install_job # matches a download job to an install job
install_job.download_parts.add(download_job)
# only start the jobs once install_job.download_parts is fully populated
for download_job in install_job.download_parts:
self._download_queue.submit_download_job(
download_job,
on_start=self._download_started_callback,
on_progress=self._download_progress_callback,
on_complete=self._download_complete_callback,
on_error=self._download_error_callback,
on_cancelled=self._download_cancelled_callback,
)
return install_job
def _stat_size(self, path: Path) -> int:
@@ -766,104 +768,87 @@ class ModelInstallService(ModelInstallServiceBase):
size += sum(self._stat_size(Path(root, x)) for x in files)
return size
def _multifile_download(
self,
remote_files: List[RemoteModelFile],
dest: Path,
subfolder: Optional[Path] = None,
access_token: Optional[str] = None,
submit_job: bool = True,
) -> MultiFileDownloadJob:
# HuggingFace repo subfolders are a little tricky. If the name of the model is "sdxl-turbo", and
# we are installing the "vae" subfolder, we do not want to create an additional folder level, such
# as "sdxl-turbo/vae", nor do we want to put the contents of the vae folder directly into "sdxl-turbo".
# So what we do is to synthesize a folder named "sdxl-turbo_vae" here.
if subfolder:
top = Path(remote_files[0].path.parts[0]) # e.g. "sdxl-turbo/"
path_to_remove = top / subfolder.parts[-1] # sdxl-turbo/vae/
path_to_add = Path(f"{top}_{subfolder}")
else:
path_to_remove = Path(".")
path_to_add = Path(".")
parts: List[RemoteModelFile] = []
for model_file in remote_files:
assert model_file.size is not None
parts.append(
RemoteModelFile(
url=model_file.url, # if a subfolder, then sdxl-turbo_vae/config.json
path=path_to_add / model_file.path.relative_to(path_to_remove),
)
)
return self._download_queue.multifile_download(
parts=parts,
dest=dest,
access_token=access_token,
submit_job=submit_job,
on_start=self._download_started_callback,
on_progress=self._download_progress_callback,
on_complete=self._download_complete_callback,
on_error=self._download_error_callback,
on_cancelled=self._download_cancelled_callback,
)
# ------------------------------------------------------------------
# Callbacks are executed by the download queue in a separate thread
# ------------------------------------------------------------------
def _download_started_callback(self, download_job: MultiFileDownloadJob) -> None:
def _download_started_callback(self, download_job: DownloadJob) -> None:
self._logger.info(f"Model download started: {download_job.source}")
with self._lock:
if install_job := self._download_cache.get(download_job.id, None):
install_job.status = InstallStatus.DOWNLOADING
install_job = self._download_cache[download_job.source]
install_job.status = InstallStatus.DOWNLOADING
if install_job.local_path == install_job._install_tmpdir: # first time
assert download_job.download_path
install_job.local_path = download_job.download_path
install_job.download_parts = download_job.download_parts
install_job.bytes = sum(x.bytes for x in download_job.download_parts)
install_job.total_bytes = download_job.total_bytes
assert download_job.download_path
if install_job.local_path == install_job._install_tmpdir:
partial_path = download_job.download_path.relative_to(install_job._install_tmpdir)
dest_name = partial_path.parts[0]
install_job.local_path = install_job._install_tmpdir / dest_name
# Update the total bytes count for remote sources.
if not install_job.total_bytes:
install_job.total_bytes = sum(x.total_bytes for x in install_job.download_parts)
def _download_progress_callback(self, download_job: DownloadJob) -> None:
with self._lock:
install_job = self._download_cache[download_job.source]
if install_job.cancelled: # This catches the case in which the caller directly calls job.cancel()
self._cancel_download_parts(install_job)
else:
# update sizes
install_job.bytes = sum(x.bytes for x in install_job.download_parts)
self._signal_job_downloading(install_job)
def _download_progress_callback(self, download_job: MultiFileDownloadJob) -> None:
def _download_complete_callback(self, download_job: DownloadJob) -> None:
self._logger.info(f"Model download complete: {download_job.source}")
with self._lock:
if install_job := self._download_cache.get(download_job.id, None):
if install_job.cancelled: # This catches the case in which the caller directly calls job.cancel()
self._download_queue.cancel_job(download_job)
else:
# update sizes
install_job.bytes = sum(x.bytes for x in download_job.download_parts)
install_job.total_bytes = sum(x.total_bytes for x in download_job.download_parts)
self._signal_job_downloading(install_job)
install_job = self._download_cache[download_job.source]
def _download_complete_callback(self, download_job: MultiFileDownloadJob) -> None:
with self._lock:
if install_job := self._download_cache.pop(download_job.id, None):
# are there any more active jobs left in this task?
if install_job.downloading and all(x.complete for x in install_job.download_parts):
self._signal_job_downloads_done(install_job)
self._put_in_queue(install_job) # this starts the installation and registration
self._put_in_queue(install_job)
# Let other threads know that the number of downloads has changed
self._downloads_changed_event.set()
# Let other threads know that the number of downloads has changed
self._download_cache.pop(download_job.source, None)
self._downloads_changed_event.set()
def _download_error_callback(self, download_job: MultiFileDownloadJob, excp: Optional[Exception] = None) -> None:
def _download_error_callback(self, download_job: DownloadJob, excp: Optional[Exception] = None) -> None:
with self._lock:
if install_job := self._download_cache.pop(download_job.id, None):
assert excp is not None
install_job.set_error(excp)
self._download_queue.cancel_job(download_job)
install_job = self._download_cache.pop(download_job.source, None)
assert install_job is not None
assert excp is not None
install_job.set_error(excp)
self._logger.error(
f"Cancelling {install_job.source} due to an error while downloading {download_job.source}: {str(excp)}"
)
self._cancel_download_parts(install_job)
# Let other threads know that the number of downloads has changed
self._downloads_changed_event.set()
# Let other threads know that the number of downloads has changed
self._downloads_changed_event.set()
def _download_cancelled_callback(self, download_job: MultiFileDownloadJob) -> None:
def _download_cancelled_callback(self, download_job: DownloadJob) -> None:
with self._lock:
if install_job := self._download_cache.pop(download_job.id, None):
self._downloads_changed_event.set()
# if install job has already registered an error, then do not replace its status with cancelled
if not install_job.errored:
install_job.cancel()
install_job = self._download_cache.pop(download_job.source, None)
if not install_job:
return
self._downloads_changed_event.set()
self._logger.warning(f"Model download canceled: {download_job.source}")
# if install job has already registered an error, then do not replace its status with cancelled
if not install_job.errored:
install_job.cancel()
self._cancel_download_parts(install_job)
# Let other threads know that the number of downloads has changed
self._downloads_changed_event.set()
# Let other threads know that the number of downloads has changed
self._downloads_changed_event.set()
def _cancel_download_parts(self, install_job: ModelInstallJob) -> None:
# on multipart downloads, _cancel_components() will get called repeatedly from the download callbacks
# do not lock here because it gets called within a locked context
for s in install_job.download_parts:
self._download_queue.cancel_job(s)
if all(x.in_terminal_state for x in install_job.download_parts):
# When all parts have reached their terminal state, we finalize the job to clean up the temporary directory and other resources
self._put_in_queue(install_job)
# ------------------------------------------------------------------------------------------------
# Internal methods that put events on the event bus
@@ -876,9 +861,6 @@ class ModelInstallService(ModelInstallServiceBase):
def _signal_job_downloading(self, job: ModelInstallJob) -> None:
if self._event_bus:
assert job._multifile_job is not None
assert job.bytes is not None
assert job.total_bytes is not None
self._event_bus.emit_model_install_download_progress(job)
def _signal_job_downloads_done(self, job: ModelInstallJob) -> None:
@@ -893,8 +875,6 @@ class ModelInstallService(ModelInstallServiceBase):
self._logger.info(f"Model install complete: {job.source}")
self._logger.debug(f"{job.local_path} registered key {job.config_out.key}")
if self._event_bus:
assert job.local_path is not None
assert job.config_out is not None
self._event_bus.emit_model_install_complete(job)
def _signal_job_errored(self, job: ModelInstallJob) -> None:
@@ -910,13 +890,7 @@ class ModelInstallService(ModelInstallServiceBase):
self._event_bus.emit_model_install_cancelled(job)
@staticmethod
def get_fetcher_from_url(url: str) -> Type[ModelMetadataFetchBase]:
"""
Return a metadata fetcher appropriate for provided url.
This used to be more useful, but the number of supported model
sources has been reduced to HuggingFace alone.
"""
def get_fetcher_from_url(url: str) -> ModelMetadataFetchBase:
if re.match(r"^https?://huggingface.co/[^/]+/[^/]+$", url.lower()):
return HuggingFaceMetadataFetch
raise ValueError(f"Unsupported model source: '{url}'")

View File

@@ -2,11 +2,10 @@
"""Base class for model loader."""
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Callable, Optional
from typing import Optional
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType
from invokeai.backend.model_manager.load import LoadedModel, LoadedModelWithoutConfig
from invokeai.backend.model_manager.load import LoadedModel
from invokeai.backend.model_manager.load.convert_cache import ModelConvertCacheBase
from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase
@@ -32,26 +31,3 @@ class ModelLoadServiceBase(ABC):
@abstractmethod
def convert_cache(self) -> ModelConvertCacheBase:
"""Return the checkpoint convert cache used by this loader."""
@abstractmethod
def load_model_from_path(
self, model_path: Path, loader: Optional[Callable[[Path], AnyModel]] = None
) -> LoadedModelWithoutConfig:
"""
Load the model file or directory located at the indicated Path.
This will load an arbitrary model file into the RAM cache. If the optional loader
argument is provided, the loader will be invoked to load the model into
memory. Otherwise the method will call safetensors.torch.load_file() or
torch.load() as appropriate to the file suffix.
Be aware that this returns a LoadedModelWithoutConfig object, which is the same as
LoadedModel, but without the config attribute.
Args:
model_path: A pathlib.Path to a checkpoint-style models file
loader: A Callable that expects a Path and returns a Dict[str, Tensor]
Returns:
A LoadedModel object.
"""

View File

@@ -1,26 +1,18 @@
# Copyright (c) 2024 Lincoln D. Stein and the InvokeAI Team
"""Implementation of model loader service."""
from pathlib import Path
from typing import Callable, Optional, Type
from picklescan.scanner import scan_file_path
from safetensors.torch import load_file as safetensors_load_file
from torch import load as torch_load
from typing import Optional, Type
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.invoker import Invoker
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType
from invokeai.backend.model_manager.load import (
LoadedModel,
LoadedModelWithoutConfig,
ModelLoaderRegistry,
ModelLoaderRegistryBase,
)
from invokeai.backend.model_manager.load.convert_cache import ModelConvertCacheBase
from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase
from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.logging import InvokeAILogger
from .model_load_base import ModelLoadServiceBase
@@ -83,41 +75,3 @@ class ModelLoadService(ModelLoadServiceBase):
self._invoker.services.events.emit_model_load_complete(model_config, submodel_type)
return loaded_model
def load_model_from_path(
self, model_path: Path, loader: Optional[Callable[[Path], AnyModel]] = None
) -> LoadedModelWithoutConfig:
cache_key = str(model_path)
ram_cache = self.ram_cache
try:
return LoadedModelWithoutConfig(_locker=ram_cache.get(key=cache_key))
except IndexError:
pass
def torch_load_file(checkpoint: Path) -> AnyModel:
scan_result = scan_file_path(checkpoint)
if scan_result.infected_files != 0:
raise Exception("The model at {checkpoint} is potentially infected by malware. Aborting load.")
result = torch_load(checkpoint, map_location="cpu")
return result
def diffusers_load_directory(directory: Path) -> AnyModel:
load_class = GenericDiffusersLoader(
app_config=self._app_config,
logger=self._logger,
ram_cache=self._ram_cache,
convert_cache=self.convert_cache,
).get_hf_load_class(directory)
return load_class.from_pretrained(model_path, torch_dtype=TorchDevice.choose_torch_dtype())
loader = loader or (
diffusers_load_directory
if model_path.is_dir()
else torch_load_file
if model_path.suffix.endswith((".ckpt", ".pt", ".pth", ".bin"))
else lambda path: safetensors_load_file(path, device="cpu")
)
assert loader is not None
raw_model = loader(model_path)
ram_cache.put(key=cache_key, model=raw_model)
return LoadedModelWithoutConfig(_locker=ram_cache.get(key=cache_key))

View File

@@ -12,13 +12,15 @@ from pydantic import BaseModel, Field
from invokeai.app.services.shared.pagination import PaginatedResults
from invokeai.app.util.model_exclude_null import BaseModelExcludeNull
from invokeai.backend.model_manager.config import (
from invokeai.backend.model_manager import (
AnyModelConfig,
BaseModelType,
ControlAdapterDefaultSettings,
MainModelDefaultSettings,
ModelFormat,
ModelType,
)
from invokeai.backend.model_manager.config import (
ControlAdapterDefaultSettings,
MainModelDefaultSettings,
ModelVariantType,
SchedulerPredictionType,
)

View File

@@ -2,19 +2,18 @@
import copy
import itertools
from typing import Any, Optional, TypeVar, Union, get_args, get_origin, get_type_hints
from typing import Annotated, Any, Optional, TypeVar, Union, get_args, get_origin, get_type_hints
import networkx as nx
from pydantic import (
BaseModel,
GetCoreSchemaHandler,
GetJsonSchemaHandler,
ValidationError,
field_validator,
)
from pydantic.fields import Field
from pydantic.json_schema import JsonSchemaValue
from pydantic_core import core_schema
from pydantic_core import CoreSchema
# Importing * is bad karma but needed here for node detection
from invokeai.app.invocations import * # noqa: F401 F403
@@ -278,58 +277,73 @@ class CollectInvocation(BaseInvocation):
return CollectInvocationOutput(collection=copy.copy(self.collection))
class AnyInvocation(BaseInvocation):
@classmethod
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
def validate_invocation(v: Any) -> "AnyInvocation":
return BaseInvocation.get_typeadapter().validate_python(v)
return core_schema.no_info_plain_validator_function(validate_invocation)
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
# Nodes are too powerful, we have to make our own OpenAPI schema manually
# No but really, because the schema is dynamic depending on loaded nodes, we need to generate it manually
oneOf: list[dict[str, str]] = []
names = [i.__name__ for i in BaseInvocation.get_invocations()]
for name in sorted(names):
oneOf.append({"$ref": f"#/components/schemas/{name}"})
return {"oneOf": oneOf}
class AnyInvocationOutput(BaseInvocationOutput):
@classmethod
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler):
def validate_invocation_output(v: Any) -> "AnyInvocationOutput":
return BaseInvocationOutput.get_typeadapter().validate_python(v)
return core_schema.no_info_plain_validator_function(validate_invocation_output)
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
# Nodes are too powerful, we have to make our own OpenAPI schema manually
# No but really, because the schema is dynamic depending on loaded nodes, we need to generate it manually
oneOf: list[dict[str, str]] = []
names = [i.__name__ for i in BaseInvocationOutput.get_outputs()]
for name in sorted(names):
oneOf.append({"$ref": f"#/components/schemas/{name}"})
return {"oneOf": oneOf}
class Graph(BaseModel):
id: str = Field(description="The id of this graph", default_factory=uuid_string)
# TODO: use a list (and never use dict in a BaseModel) because pydantic/fastapi hates me
nodes: dict[str, AnyInvocation] = Field(description="The nodes in this graph", default_factory=dict)
nodes: dict[str, BaseInvocation] = Field(description="The nodes in this graph", default_factory=dict)
edges: list[Edge] = Field(
description="The connections between nodes and their fields in this graph",
default_factory=list,
)
@field_validator("nodes", mode="plain")
@classmethod
def validate_nodes(cls, v: dict[str, Any]):
"""Validates the nodes in the graph by retrieving a union of all node types and validating each node."""
# Invocations register themselves as their python modules are executed. The union of all invocations is
# constructed at runtime. We use pydantic to validate `Graph.nodes` using that union.
#
# It's possible that when `graph.py` is executed, not all invocation-containing modules will have executed. If
# we construct the invocation union as `graph.py` is executed, we may miss some invocations. Those missing
# invocations will cause a graph to fail if they are used.
#
# We can get around this by validating the nodes in the graph using a "plain" validator, which overrides the
# pydantic validation entirely. This allows us to validate the nodes using the union of invocations at runtime.
#
# This same pattern is used in `GraphExecutionState`.
nodes: dict[str, BaseInvocation] = {}
typeadapter = BaseInvocation.get_typeadapter()
for node_id, node in v.items():
nodes[node_id] = typeadapter.validate_python(node)
return nodes
@classmethod
def __get_pydantic_json_schema__(cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue:
# We use a "plain" validator to validate the nodes in the graph. Pydantic is unable to create a JSON Schema for
# fields that use "plain" validators, so we have to hack around this. Also, we need to add all invocations to
# the generated schema as options for the `nodes` field.
#
# The workaround is to create a new BaseModel that has the same fields as `Graph` but without the validator and
# with the invocation union as the type for the `nodes` field. Pydantic then generates the JSON Schema as
# expected.
#
# You might be tempted to do something like this:
#
# ```py
# cloned_model = create_model(cls.__name__, __base__=cls, nodes=...)
# delattr(cloned_model, "validate_nodes")
# cloned_model.model_rebuild(force=True)
# json_schema = handler(cloned_model.__pydantic_core_schema__)
# ```
#
# Unfortunately, this does not work. Calling `handler` here results in infinite recursion as pydantic attempts
# to build the JSON Schema for the cloned model. Instead, we have to manually clone the model.
#
# This same pattern is used in `GraphExecutionState`.
class Graph(BaseModel):
id: Optional[str] = Field(default=None, description="The id of this graph")
nodes: dict[
str, Annotated[Union[tuple(BaseInvocation._invocation_classes)], Field(discriminator="type")]
] = Field(description="The nodes in this graph")
edges: list[Edge] = Field(description="The connections between nodes and their fields in this graph")
json_schema = handler(Graph.__pydantic_core_schema__)
json_schema = handler.resolve_ref_schema(json_schema)
return json_schema
def add_node(self, node: BaseInvocation) -> None:
"""Adds a node to a graph
@@ -760,7 +774,7 @@ class GraphExecutionState(BaseModel):
)
# The results of executed nodes
results: dict[str, AnyInvocationOutput] = Field(description="The results of node executions", default_factory=dict)
results: dict[str, BaseInvocationOutput] = Field(description="The results of node executions", default_factory=dict)
# Errors raised when executing nodes
errors: dict[str, str] = Field(description="Errors raised when executing nodes", default_factory=dict)
@@ -777,12 +791,52 @@ class GraphExecutionState(BaseModel):
default_factory=dict,
)
@field_validator("results", mode="plain")
@classmethod
def validate_results(cls, v: dict[str, BaseInvocationOutput]):
"""Validates the results in the GES by retrieving a union of all output types and validating each result."""
# See the comment in `Graph.validate_nodes` for an explanation of this logic.
results: dict[str, BaseInvocationOutput] = {}
typeadapter = BaseInvocationOutput.get_typeadapter()
for result_id, result in v.items():
results[result_id] = typeadapter.validate_python(result)
return results
@field_validator("graph")
def graph_is_valid(cls, v: Graph):
"""Validates that the graph is valid"""
v.validate_self()
return v
@classmethod
def __get_pydantic_json_schema__(cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue:
# See the comment in `Graph.__get_pydantic_json_schema__` for an explanation of this logic.
class GraphExecutionState(BaseModel):
"""Tracks the state of a graph execution"""
id: str = Field(description="The id of the execution state")
graph: Graph = Field(description="The graph being executed")
execution_graph: Graph = Field(description="The expanded graph of activated and executed nodes")
executed: set[str] = Field(description="The set of node ids that have been executed")
executed_history: list[str] = Field(
description="The list of node ids that have been executed, in order of execution"
)
results: dict[
str, Annotated[Union[tuple(BaseInvocationOutput._output_classes)], Field(discriminator="type")]
] = Field(description="The results of node executions")
errors: dict[str, str] = Field(description="Errors raised when executing nodes")
prepared_source_mapping: dict[str, str] = Field(
description="The map of prepared nodes to original graph nodes"
)
source_prepared_mapping: dict[str, set[str]] = Field(
description="The map of original graph nodes to prepared nodes"
)
json_schema = handler(GraphExecutionState.__pydantic_core_schema__)
json_schema = handler.resolve_ref_schema(json_schema)
return json_schema
def next(self) -> Optional[BaseInvocation]:
"""Gets the next node ready to execute."""

View File

@@ -3,7 +3,6 @@ from pathlib import Path
from typing import TYPE_CHECKING, Callable, Optional, Union
from PIL.Image import Image
from pydantic.networks import AnyHttpUrl
from torch import Tensor
from invokeai.app.invocations.constants import IMAGE_MODES
@@ -15,15 +14,8 @@ from invokeai.app.services.images.images_common import ImageDTO
from invokeai.app.services.invocation_services import InvocationServices
from invokeai.app.services.model_records.model_records_base import UnknownModelException
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.model_manager.config import (
AnyModel,
AnyModelConfig,
BaseModelType,
ModelFormat,
ModelType,
SubModelType,
)
from invokeai.backend.model_manager.load.load_base import LoadedModel, LoadedModelWithoutConfig
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelFormat, ModelType, SubModelType
from invokeai.backend.model_manager.load.load_base import LoadedModel
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
@@ -328,10 +320,8 @@ class ConditioningInterface(InvocationContextInterface):
class ModelsInterface(InvocationContextInterface):
"""Common API for loading, downloading and managing models."""
def exists(self, identifier: Union[str, "ModelIdentifierField"]) -> bool:
"""Check if a model exists.
"""Checks if a model exists.
Args:
identifier: The key or ModelField representing the model.
@@ -341,13 +331,13 @@ class ModelsInterface(InvocationContextInterface):
"""
if isinstance(identifier, str):
return self._services.model_manager.store.exists(identifier)
else:
return self._services.model_manager.store.exists(identifier.key)
return self._services.model_manager.store.exists(identifier.key)
def load(
self, identifier: Union[str, "ModelIdentifierField"], submodel_type: Optional[SubModelType] = None
) -> LoadedModel:
"""Load a model.
"""Loads a model.
Args:
identifier: The key or ModelField representing the model.
@@ -371,7 +361,7 @@ class ModelsInterface(InvocationContextInterface):
def load_by_attrs(
self, name: str, base: BaseModelType, type: ModelType, submodel_type: Optional[SubModelType] = None
) -> LoadedModel:
"""Load a model by its attributes.
"""Loads a model by its attributes.
Args:
name: Name of the model.
@@ -394,7 +384,7 @@ class ModelsInterface(InvocationContextInterface):
return self._services.model_manager.load.load_model(configs[0], submodel_type)
def get_config(self, identifier: Union[str, "ModelIdentifierField"]) -> AnyModelConfig:
"""Get a model's config.
"""Gets a model's config.
Args:
identifier: The key or ModelField representing the model.
@@ -404,11 +394,11 @@ class ModelsInterface(InvocationContextInterface):
"""
if isinstance(identifier, str):
return self._services.model_manager.store.get_model(identifier)
else:
return self._services.model_manager.store.get_model(identifier.key)
return self._services.model_manager.store.get_model(identifier.key)
def search_by_path(self, path: Path) -> list[AnyModelConfig]:
"""Search for models by path.
"""Searches for models by path.
Args:
path: The path to search for.
@@ -425,7 +415,7 @@ class ModelsInterface(InvocationContextInterface):
type: Optional[ModelType] = None,
format: Optional[ModelFormat] = None,
) -> list[AnyModelConfig]:
"""Search for models by attributes.
"""Searches for models by attributes.
Args:
name: The name to search for (exact match).
@@ -444,72 +434,6 @@ class ModelsInterface(InvocationContextInterface):
model_format=format,
)
def download_and_cache_model(
self,
source: str | AnyHttpUrl,
) -> Path:
"""
Download the model file located at source to the models cache and return its Path.
This can be used to single-file install models and other resources of arbitrary types
which should not get registered with the database. If the model is already
installed, the cached path will be returned. Otherwise it will be downloaded.
Args:
source: A URL that points to the model, or a huggingface repo_id.
Returns:
Path to the downloaded model
"""
return self._services.model_manager.install.download_and_cache_model(source=source)
def load_local_model(
self,
model_path: Path,
loader: Optional[Callable[[Path], AnyModel]] = None,
) -> LoadedModelWithoutConfig:
"""
Load the model file located at the indicated path
If a loader callable is provided, it will be invoked to load the model. Otherwise,
`safetensors.torch.load_file()` or `torch.load()` will be called to load the model.
Be aware that the LoadedModelWithoutConfig object has no `config` attribute
Args:
path: A model Path
loader: A Callable that expects a Path and returns a dict[str|int, Any]
Returns:
A LoadedModelWithoutConfig object.
"""
return self._services.model_manager.load.load_model_from_path(model_path=model_path, loader=loader)
def load_remote_model(
self,
source: str | AnyHttpUrl,
loader: Optional[Callable[[Path], AnyModel]] = None,
) -> LoadedModelWithoutConfig:
"""
Download, cache, and load the model file located at the indicated URL or repo_id.
If the model is already downloaded, it will be loaded from the cache.
If the a loader callable is provided, it will be invoked to load the model. Otherwise,
`safetensors.torch.load_file()` or `torch.load()` will be called to load the model.
Be aware that the LoadedModelWithoutConfig object has no `config` attribute
Args:
source: A URL or huggingface repoid.
loader: A Callable that expects a Path and returns a dict[str|int, Any]
Returns:
A LoadedModelWithoutConfig object.
"""
model_path = self._services.model_manager.install.download_and_cache_model(source=str(source))
return self._services.model_manager.load.load_model_from_path(model_path=model_path, loader=loader)
class ConfigInterface(InvocationContextInterface):
def get(self) -> InvokeAIAppConfig:

View File

@@ -13,7 +13,6 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_7 import
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_8 import build_migration_8
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_9 import build_migration_9
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_10 import build_migration_10
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_11 import build_migration_11
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
@@ -44,7 +43,6 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
migrator.register_migration(build_migration_8(app_config=config))
migrator.register_migration(build_migration_9())
migrator.register_migration(build_migration_10())
migrator.register_migration(build_migration_11(app_config=config, logger=logger))
migrator.run_migrations()
return db

View File

@@ -1,75 +0,0 @@
import shutil
import sqlite3
from logging import Logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
LEGACY_CORE_MODELS = [
# OpenPose
"any/annotators/dwpose/yolox_l.onnx",
"any/annotators/dwpose/dw-ll_ucoco_384.onnx",
# DepthAnything
"any/annotators/depth_anything/depth_anything_vitl14.pth",
"any/annotators/depth_anything/depth_anything_vitb14.pth",
"any/annotators/depth_anything/depth_anything_vits14.pth",
# Lama inpaint
"core/misc/lama/lama.pt",
# RealESRGAN upscale
"core/upscaling/realesrgan/RealESRGAN_x4plus.pth",
"core/upscaling/realesrgan/RealESRGAN_x4plus_anime_6B.pth",
"core/upscaling/realesrgan/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
"core/upscaling/realesrgan/RealESRGAN_x2plus.pth",
]
class Migration11Callback:
def __init__(self, app_config: InvokeAIAppConfig, logger: Logger) -> None:
self._app_config = app_config
self._logger = logger
def __call__(self, cursor: sqlite3.Cursor) -> None:
self._remove_convert_cache()
self._remove_downloaded_models()
self._remove_unused_core_models()
def _remove_convert_cache(self) -> None:
"""Rename models/.cache to models/.convert_cache."""
self._logger.info("Removing .cache directory. Converted models will now be cached in .convert_cache.")
legacy_convert_path = self._app_config.root_path / "models" / ".cache"
shutil.rmtree(legacy_convert_path, ignore_errors=True)
def _remove_downloaded_models(self) -> None:
"""Remove models from their old locations; they will re-download when needed."""
self._logger.info(
"Removing legacy just-in-time models. Downloaded models will now be cached in .download_cache."
)
for model_path in LEGACY_CORE_MODELS:
legacy_dest_path = self._app_config.models_path / model_path
legacy_dest_path.unlink(missing_ok=True)
def _remove_unused_core_models(self) -> None:
"""Remove unused core models and their directories."""
self._logger.info("Removing defunct core models.")
for dir in ["face_restoration", "misc", "upscaling"]:
path_to_remove = self._app_config.models_path / "core" / dir
shutil.rmtree(path_to_remove, ignore_errors=True)
shutil.rmtree(self._app_config.models_path / "any" / "annotators", ignore_errors=True)
def build_migration_11(app_config: InvokeAIAppConfig, logger: Logger) -> Migration:
"""
Build the migration from database version 10 to 11.
This migration does the following:
- Moves "core" models previously downloaded with download_with_progress_bar() into new
"models/.download_cache" directory.
- Renames "models/.cache" to "models/.convert_cache".
"""
migration_11 = Migration(
from_version=10,
to_version=11,
callback=Migration11Callback(app_config=app_config, logger=logger),
)
return migration_11

View File

@@ -1,116 +0,0 @@
from typing import Any, Callable, Optional
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from pydantic.json_schema import models_json_schema
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, UIConfigBase
from invokeai.app.invocations.fields import InputFieldJSONSchemaExtra, OutputFieldJSONSchemaExtra
from invokeai.app.invocations.model import ModelIdentifierField
from invokeai.app.services.events.events_common import EventBase
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
def move_defs_to_top_level(openapi_schema: dict[str, Any], component_schema: dict[str, Any]) -> None:
"""Moves a component schema's $defs to the top level of the openapi schema. Useful when generating a schema
for a single model that needs to be added back to the top level of the schema. Mutates openapi_schema and
component_schema."""
defs = component_schema.pop("$defs", {})
for schema_key, json_schema in defs.items():
if schema_key in openapi_schema["components"]["schemas"]:
continue
openapi_schema["components"]["schemas"][schema_key] = json_schema
def get_openapi_func(
app: FastAPI, post_transform: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None
) -> Callable[[], dict[str, Any]]:
"""Gets the OpenAPI schema generator function.
Args:
app (FastAPI): The FastAPI app to generate the schema for.
post_transform (Optional[Callable[[dict[str, Any]], dict[str, Any]]], optional): A function to apply to the
generated schema before returning it. Defaults to None.
Returns:
Callable[[], dict[str, Any]]: The OpenAPI schema generator function. When first called, the generated schema is
cached in `app.openapi_schema`. On subsequent calls, the cached schema is returned. This caching behaviour
matches FastAPI's default schema generation caching.
"""
def openapi() -> dict[str, Any]:
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title=app.title,
description="An API for invoking AI image operations",
version="1.0.0",
routes=app.routes,
separate_input_output_schemas=False, # https://fastapi.tiangolo.com/how-to/separate-openapi-schemas/
)
# We'll create a map of invocation type to output schema to make some types simpler on the client.
invocation_output_map_properties: dict[str, Any] = {}
invocation_output_map_required: list[str] = []
# We need to manually add all outputs to the schema - pydantic doesn't add them because they aren't used directly.
for output in BaseInvocationOutput.get_outputs():
json_schema = output.model_json_schema(mode="serialization", ref_template="#/components/schemas/{model}")
move_defs_to_top_level(openapi_schema, json_schema)
openapi_schema["components"]["schemas"][output.__name__] = json_schema
# Technically, invocations are added to the schema by pydantic, but we still need to manually set their output
# property, so we'll just do it all manually.
for invocation in BaseInvocation.get_invocations():
json_schema = invocation.model_json_schema(
mode="serialization", ref_template="#/components/schemas/{model}"
)
move_defs_to_top_level(openapi_schema, json_schema)
output_title = invocation.get_output_annotation().__name__
outputs_ref = {"$ref": f"#/components/schemas/{output_title}"}
json_schema["output"] = outputs_ref
openapi_schema["components"]["schemas"][invocation.__name__] = json_schema
# Add this invocation and its output to the output map
invocation_type = invocation.get_type()
invocation_output_map_properties[invocation_type] = json_schema["output"]
invocation_output_map_required.append(invocation_type)
# Add the output map to the schema
openapi_schema["components"]["schemas"]["InvocationOutputMap"] = {
"type": "object",
"properties": invocation_output_map_properties,
"required": invocation_output_map_required,
}
# Some models don't end up in the schemas as standalone definitions because they aren't used directly in the API.
# We need to add them manually here. WARNING: Pydantic can choke if you call `model.model_json_schema()` to get
# a schema. This has something to do with schema refs - not totally clear. For whatever reason, using
# `models_json_schema` seems to work fine.
additional_models = [
*EventBase.get_events(),
UIConfigBase,
InputFieldJSONSchemaExtra,
OutputFieldJSONSchemaExtra,
ModelIdentifierField,
ProgressImage,
]
additional_schemas = models_json_schema(
[(m, "serialization") for m in additional_models],
ref_template="#/components/schemas/{model}",
)
# additional_schemas[1] is a dict of $defs that we need to add to the top level of the schema
move_defs_to_top_level(openapi_schema, additional_schemas[1])
if post_transform is not None:
openapi_schema = post_transform(openapi_schema)
openapi_schema["components"]["schemas"] = dict(sorted(openapi_schema["components"]["schemas"].items()))
app.openapi_schema = openapi_schema
return app.openapi_schema
return openapi

View File

@@ -0,0 +1,51 @@
from pathlib import Path
from urllib import request
from tqdm import tqdm
from invokeai.backend.util.logging import InvokeAILogger
class ProgressBar:
"""Simple progress bar for urllib.request.urlretrieve using tqdm."""
def __init__(self, model_name: str = "file"):
self.pbar = None
self.name = model_name
def __call__(self, block_num: int, block_size: int, total_size: int):
if not self.pbar:
self.pbar = tqdm(
desc=self.name,
initial=0,
unit="iB",
unit_scale=True,
unit_divisor=1000,
total=total_size,
)
self.pbar.update(block_size)
def download_with_progress_bar(name: str, url: str, dest_path: Path) -> bool:
"""Download a file from a URL to a destination path, with a progress bar.
If the file already exists, it will not be downloaded again.
Exceptions are not caught.
Args:
name (str): Name of the file being downloaded.
url (str): URL to download the file from.
dest_path (Path): Destination path to save the file to.
Returns:
bool: True if the file was downloaded, False if it already existed.
"""
if dest_path.exists():
return False # already downloaded
InvokeAILogger.get_logger().info(f"Downloading {name}...")
dest_path.parent.mkdir(parents=True, exist_ok=True)
request.urlretrieve(url, dest_path, ProgressBar(name))
return True

View File

@@ -1,5 +1,5 @@
from pathlib import Path
from typing import Literal
import pathlib
from typing import Literal, Union
import cv2
import numpy as np
@@ -10,17 +10,28 @@ from PIL import Image
from torchvision.transforms import Compose
from invokeai.app.services.config.config_default import get_config
from invokeai.app.util.download_with_progress import download_with_progress_bar
from invokeai.backend.image_util.depth_anything.model.dpt import DPT_DINOv2
from invokeai.backend.image_util.depth_anything.utilities.util import NormalizeImage, PrepareForNet, Resize
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.logging import InvokeAILogger
config = get_config()
logger = InvokeAILogger.get_logger(config=config)
DEPTH_ANYTHING_MODELS = {
"large": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitl14.pth?download=true",
"base": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitb14.pth?download=true",
"small": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vits14.pth?download=true",
"large": {
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitl14.pth?download=true",
"local": "any/annotators/depth_anything/depth_anything_vitl14.pth",
},
"base": {
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitb14.pth?download=true",
"local": "any/annotators/depth_anything/depth_anything_vitb14.pth",
},
"small": {
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vits14.pth?download=true",
"local": "any/annotators/depth_anything/depth_anything_vits14.pth",
},
}
@@ -42,27 +53,36 @@ transform = Compose(
class DepthAnythingDetector:
def __init__(self, model: DPT_DINOv2, device: torch.device) -> None:
self.model = model
self.device = device
def __init__(self) -> None:
self.model = None
self.model_size: Union[Literal["large", "base", "small"], None] = None
self.device = TorchDevice.choose_torch_device()
@staticmethod
def load_model(
model_path: Path, device: torch.device, model_size: Literal["large", "base", "small"] = "small"
) -> DPT_DINOv2:
match model_size:
case "small":
model = DPT_DINOv2(encoder="vits", features=64, out_channels=[48, 96, 192, 384])
case "base":
model = DPT_DINOv2(encoder="vitb", features=128, out_channels=[96, 192, 384, 768])
case "large":
model = DPT_DINOv2(encoder="vitl", features=256, out_channels=[256, 512, 1024, 1024])
def load_model(self, model_size: Literal["large", "base", "small"] = "small"):
DEPTH_ANYTHING_MODEL_PATH = config.models_path / DEPTH_ANYTHING_MODELS[model_size]["local"]
download_with_progress_bar(
pathlib.Path(DEPTH_ANYTHING_MODELS[model_size]["url"]).name,
DEPTH_ANYTHING_MODELS[model_size]["url"],
DEPTH_ANYTHING_MODEL_PATH,
)
model.load_state_dict(torch.load(model_path.as_posix(), map_location="cpu"))
model.eval()
if not self.model or model_size != self.model_size:
del self.model
self.model_size = model_size
model.to(device)
return model
match self.model_size:
case "small":
self.model = DPT_DINOv2(encoder="vits", features=64, out_channels=[48, 96, 192, 384])
case "base":
self.model = DPT_DINOv2(encoder="vitb", features=128, out_channels=[96, 192, 384, 768])
case "large":
self.model = DPT_DINOv2(encoder="vitl", features=256, out_channels=[256, 512, 1024, 1024])
self.model.load_state_dict(torch.load(DEPTH_ANYTHING_MODEL_PATH.as_posix(), map_location="cpu"))
self.model.eval()
self.model.to(self.device)
return self.model
def __call__(self, image: Image.Image, resolution: int = 512) -> Image.Image:
if not self.model:

View File

@@ -1,53 +1,30 @@
from pathlib import Path
from typing import Dict
import numpy as np
import torch
from controlnet_aux.util import resize_image
from PIL import Image
from invokeai.backend.image_util.dw_openpose.utils import NDArrayInt, draw_bodypose, draw_facepose, draw_handpose
from invokeai.backend.image_util.dw_openpose.utils import draw_bodypose, draw_facepose, draw_handpose
from invokeai.backend.image_util.dw_openpose.wholebody import Wholebody
DWPOSE_MODELS = {
"yolox_l.onnx": "https://huggingface.co/yzd-v/DWPose/resolve/main/yolox_l.onnx?download=true",
"dw-ll_ucoco_384.onnx": "https://huggingface.co/yzd-v/DWPose/resolve/main/dw-ll_ucoco_384.onnx?download=true",
}
def draw_pose(
pose: Dict[str, NDArrayInt | Dict[str, NDArrayInt]],
H: int,
W: int,
draw_face: bool = True,
draw_body: bool = True,
draw_hands: bool = True,
resolution: int = 512,
) -> Image.Image:
def draw_pose(pose, H, W, draw_face=True, draw_body=True, draw_hands=True, resolution=512):
bodies = pose["bodies"]
faces = pose["faces"]
hands = pose["hands"]
assert isinstance(bodies, dict)
candidate = bodies["candidate"]
assert isinstance(bodies, dict)
subset = bodies["subset"]
canvas = np.zeros(shape=(H, W, 3), dtype=np.uint8)
if draw_body:
canvas = draw_bodypose(canvas, candidate, subset)
if draw_hands:
assert isinstance(hands, np.ndarray)
canvas = draw_handpose(canvas, hands)
if draw_face:
assert isinstance(hands, np.ndarray)
canvas = draw_facepose(canvas, faces) # type: ignore
canvas = draw_facepose(canvas, faces)
dwpose_image: Image.Image = resize_image(
dwpose_image = resize_image(
canvas,
resolution,
)
@@ -62,16 +39,11 @@ class DWOpenposeDetector:
Credits: https://github.com/IDEA-Research/DWPose
"""
def __init__(self, onnx_det: Path, onnx_pose: Path) -> None:
self.pose_estimation = Wholebody(onnx_det=onnx_det, onnx_pose=onnx_pose)
def __init__(self) -> None:
self.pose_estimation = Wholebody()
def __call__(
self,
image: Image.Image,
draw_face: bool = False,
draw_body: bool = True,
draw_hands: bool = False,
resolution: int = 512,
self, image: Image.Image, draw_face=False, draw_body=True, draw_hands=False, resolution=512
) -> Image.Image:
np_image = np.array(image)
H, W, C = np_image.shape
@@ -107,6 +79,3 @@ class DWOpenposeDetector:
return draw_pose(
pose, H, W, draw_face=draw_face, draw_hands=draw_hands, draw_body=draw_body, resolution=resolution
)
__all__ = ["DWPOSE_MODELS", "DWOpenposeDetector"]

View File

@@ -5,13 +5,11 @@ import math
import cv2
import matplotlib
import numpy as np
import numpy.typing as npt
eps = 0.01
NDArrayInt = npt.NDArray[np.uint8]
def draw_bodypose(canvas: NDArrayInt, candidate: NDArrayInt, subset: NDArrayInt) -> NDArrayInt:
def draw_bodypose(canvas, candidate, subset):
H, W, C = canvas.shape
candidate = np.array(candidate)
subset = np.array(subset)
@@ -90,7 +88,7 @@ def draw_bodypose(canvas: NDArrayInt, candidate: NDArrayInt, subset: NDArrayInt)
return canvas
def draw_handpose(canvas: NDArrayInt, all_hand_peaks: NDArrayInt) -> NDArrayInt:
def draw_handpose(canvas, all_hand_peaks):
H, W, C = canvas.shape
edges = [
@@ -144,7 +142,7 @@ def draw_handpose(canvas: NDArrayInt, all_hand_peaks: NDArrayInt) -> NDArrayInt:
return canvas
def draw_facepose(canvas: NDArrayInt, all_lmks: NDArrayInt) -> NDArrayInt:
def draw_facepose(canvas, all_lmks):
H, W, C = canvas.shape
for lmks in all_lmks:
lmks = np.array(lmks)

View File

@@ -2,26 +2,47 @@
# Modified pathing to suit Invoke
from pathlib import Path
import numpy as np
import onnxruntime as ort
from invokeai.app.services.config.config_default import get_config
from invokeai.app.util.download_with_progress import download_with_progress_bar
from invokeai.backend.util.devices import TorchDevice
from .onnxdet import inference_detector
from .onnxpose import inference_pose
DWPOSE_MODELS = {
"yolox_l.onnx": {
"local": "any/annotators/dwpose/yolox_l.onnx",
"url": "https://huggingface.co/yzd-v/DWPose/resolve/main/yolox_l.onnx?download=true",
},
"dw-ll_ucoco_384.onnx": {
"local": "any/annotators/dwpose/dw-ll_ucoco_384.onnx",
"url": "https://huggingface.co/yzd-v/DWPose/resolve/main/dw-ll_ucoco_384.onnx?download=true",
},
}
config = get_config()
class Wholebody:
def __init__(self, onnx_det: Path, onnx_pose: Path):
def __init__(self):
device = TorchDevice.choose_torch_device()
providers = ["CUDAExecutionProvider"] if device.type == "cuda" else ["CPUExecutionProvider"]
DET_MODEL_PATH = config.models_path / DWPOSE_MODELS["yolox_l.onnx"]["local"]
download_with_progress_bar("yolox_l.onnx", DWPOSE_MODELS["yolox_l.onnx"]["url"], DET_MODEL_PATH)
POSE_MODEL_PATH = config.models_path / DWPOSE_MODELS["dw-ll_ucoco_384.onnx"]["local"]
download_with_progress_bar(
"dw-ll_ucoco_384.onnx", DWPOSE_MODELS["dw-ll_ucoco_384.onnx"]["url"], POSE_MODEL_PATH
)
onnx_det = DET_MODEL_PATH
onnx_pose = POSE_MODEL_PATH
self.session_det = ort.InferenceSession(path_or_bytes=onnx_det, providers=providers)
self.session_pose = ort.InferenceSession(path_or_bytes=onnx_pose, providers=providers)

View File

@@ -1,4 +1,4 @@
from pathlib import Path
import gc
from typing import Any
import numpy as np
@@ -6,7 +6,9 @@ import torch
from PIL import Image
import invokeai.backend.util.logging as logger
from invokeai.backend.model_manager.config import AnyModel
from invokeai.app.services.config.config_default import get_config
from invokeai.app.util.download_with_progress import download_with_progress_bar
from invokeai.backend.util.devices import TorchDevice
def norm_img(np_img):
@@ -17,11 +19,28 @@ def norm_img(np_img):
return np_img
class LaMA:
def __init__(self, model: AnyModel):
self._model = model
def load_jit_model(url_or_path, device):
model_path = url_or_path
logger.info(f"Loading model from: {model_path}")
model = torch.jit.load(model_path, map_location="cpu").to(device)
model.eval()
return model
class LaMA:
def __call__(self, input_image: Image.Image, *args: Any, **kwds: Any) -> Any:
device = TorchDevice.choose_torch_device()
model_location = get_config().models_path / "core/misc/lama/lama.pt"
if not model_location.exists():
download_with_progress_bar(
name="LaMa Inpainting Model",
url="https://github.com/Sanster/models/releases/download/add_big_lama/big-lama.pt",
dest_path=model_location,
)
model = load_jit_model(model_location, device)
image = np.asarray(input_image.convert("RGB"))
image = norm_img(image)
@@ -29,25 +48,20 @@ class LaMA:
mask = np.asarray(mask)
mask = np.invert(mask)
mask = norm_img(mask)
mask = (mask > 0) * 1
device = next(self._model.buffers()).device
mask = (mask > 0) * 1
image = torch.from_numpy(image).unsqueeze(0).to(device)
mask = torch.from_numpy(mask).unsqueeze(0).to(device)
with torch.inference_mode():
infilled_image = self._model(image, mask)
infilled_image = model(image, mask)
infilled_image = infilled_image[0].permute(1, 2, 0).detach().cpu().numpy()
infilled_image = np.clip(infilled_image * 255, 0, 255).astype("uint8")
infilled_image = Image.fromarray(infilled_image)
return infilled_image
del model
gc.collect()
torch.cuda.empty_cache()
@staticmethod
def load_jit_model(url_or_path: str | Path, device: torch.device | str = "cpu") -> torch.nn.Module:
model_path = url_or_path
logger.info(f"Loading model from: {model_path}")
model: torch.nn.Module = torch.jit.load(model_path, map_location="cpu").to(device) # type: ignore
model.eval()
return model
return infilled_image

View File

@@ -1,5 +1,6 @@
import math
from enum import Enum
from pathlib import Path
from typing import Any, Optional
import cv2
@@ -10,7 +11,6 @@ from cv2.typing import MatLike
from tqdm import tqdm
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
from invokeai.backend.model_manager.config import AnyModel
from invokeai.backend.util.devices import TorchDevice
"""
@@ -52,7 +52,7 @@ class RealESRGAN:
def __init__(
self,
scale: int,
loadnet: AnyModel,
model_path: Path,
model: RRDBNet,
tile: int = 0,
tile_pad: int = 10,
@@ -67,6 +67,8 @@ class RealESRGAN:
self.half = half
self.device = TorchDevice.choose_torch_device()
loadnet = torch.load(model_path, map_location=torch.device("cpu"))
# prefer to use params_ema
if "params_ema" in loadnet:
keyname = "params_ema"

View File

@@ -36,7 +36,7 @@ from ..raw_model import RawModel
# ModelMixin is the base class for all diffusers and transformers models
# RawModel is the InvokeAI wrapper class for ip_adapters, loras, textual_inversion and onnx runtime
AnyModel = Union[ModelMixin, RawModel, torch.nn.Module, Dict[str, torch.Tensor]]
AnyModel = Union[ModelMixin, RawModel, torch.nn.Module]
class InvalidModelConfigException(Exception):
@@ -115,7 +115,7 @@ class SchedulerPredictionType(str, Enum):
class ModelRepoVariant(str, Enum):
"""Various hugging face variants on the diffusers format."""
Default = "" # model files without "fp16" or other qualifier
Default = "" # model files without "fp16" or other qualifier - empty str
FP16 = "fp16"
FP32 = "fp32"
ONNX = "onnx"

View File

@@ -30,8 +30,12 @@ def convert_ldm_vae_to_diffusers(
converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)
vae = AutoencoderKL(**vae_config)
vae.load_state_dict(converted_vae_checkpoint)
vae.to(precision)
with torch.no_grad():
vae.load_state_dict(converted_vae_checkpoint)
del converted_vae_checkpoint # Free memory
import gc
gc.collect()
vae.to(precision)
if dump_path:
vae.save_pretrained(dump_path, safe_serialization=True)
@@ -52,7 +56,11 @@ def convert_ckpt_to_diffusers(
model to be written.
"""
pipe = download_from_original_stable_diffusion_ckpt(Path(checkpoint_path).as_posix(), **kwargs)
pipe = pipe.to(precision)
with torch.no_grad():
del kwargs # Free memory
import gc
gc.collect()
pipe = pipe.to(precision)
# TO DO: save correct repo variant
if dump_path:
@@ -75,7 +83,11 @@ def convert_controlnet_to_diffusers(
model to be written.
"""
pipe = download_controlnet_from_original_ckpt(checkpoint_path.as_posix(), **kwargs)
pipe = pipe.to(precision)
with torch.no_grad():
del kwargs # Free memory
import gc
gc.collect()
pipe = pipe.to(precision)
# TO DO: save correct repo variant
if dump_path:

View File

@@ -7,7 +7,7 @@ from importlib import import_module
from pathlib import Path
from .convert_cache.convert_cache_default import ModelConvertCache
from .load_base import LoadedModel, LoadedModelWithoutConfig, ModelLoaderBase
from .load_base import LoadedModel, ModelLoaderBase
from .load_default import ModelLoader
from .model_cache.model_cache_default import ModelCache
from .model_loader_registry import ModelLoaderRegistry, ModelLoaderRegistryBase
@@ -19,7 +19,6 @@ for module in loaders:
__all__ = [
"LoadedModel",
"LoadedModelWithoutConfig",
"ModelCache",
"ModelConvertCache",
"ModelLoaderBase",

View File

@@ -7,7 +7,6 @@ from pathlib import Path
from invokeai.backend.util import GIG, directory_size
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.backend.util.util import safe_filename
from .convert_cache_base import ModelConvertCacheBase
@@ -36,7 +35,6 @@ class ModelConvertCache(ModelConvertCacheBase):
def cache_path(self, key: str) -> Path:
"""Return the path for a model with the indicated key."""
key = safe_filename(self._cache_path, key)
return self._cache_path / key
def make_room(self, size: float) -> None:

View File

@@ -4,13 +4,10 @@ Base class for model loading in InvokeAI.
"""
from abc import ABC, abstractmethod
from contextlib import contextmanager
from dataclasses import dataclass
from logging import Logger
from pathlib import Path
from typing import Any, Dict, Generator, Optional, Tuple
import torch
from typing import Any, Optional
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_manager.config import (
@@ -23,44 +20,10 @@ from invokeai.backend.model_manager.load.model_cache.model_cache_base import Mod
@dataclass
class LoadedModelWithoutConfig:
"""
Context manager object that mediates transfer from RAM<->VRAM.
This is a context manager object that has two distinct APIs:
1. Older API (deprecated):
Use the LoadedModel object directly as a context manager.
It will move the model into VRAM (on CUDA devices), and
return the model in a form suitable for passing to torch.
Example:
```
loaded_model_= loader.get_model_by_key('f13dd932', SubModelType('vae'))
with loaded_model as vae:
image = vae.decode(latents)[0]
```
2. Newer API (recommended):
Call the LoadedModel's `model_on_device()` method in a
context. It returns a tuple consisting of a copy of
the model's state dict in CPU RAM followed by a copy
of the model in VRAM. The state dict is provided to allow
LoRAs and other model patchers to return the model to
its unpatched state without expensive copy and restore
operations.
Example:
```
loaded_model_= loader.get_model_by_key('f13dd932', SubModelType('vae'))
with loaded_model.model_on_device() as (state_dict, vae):
image = vae.decode(latents)[0]
```
The state_dict should be treated as a read-only object and
never modified. Also be aware that some loadable models do
not have a state_dict, in which case this value will be None.
"""
class LoadedModel:
"""Context manager object that mediates transfer from RAM<->VRAM."""
config: AnyModelConfig
_locker: ModelLockerBase
def __enter__(self) -> AnyModel:
@@ -72,29 +35,12 @@ class LoadedModelWithoutConfig:
"""Context exit."""
self._locker.unlock()
@contextmanager
def model_on_device(self) -> Generator[Tuple[Optional[Dict[str, torch.Tensor]], AnyModel], None, None]:
"""Return a tuple consisting of the model's state dict (if it exists) and the locked model on execution device."""
locked_model = self._locker.lock()
try:
state_dict = self._locker.get_state_dict()
yield (state_dict, locked_model)
finally:
self._locker.unlock()
@property
def model(self) -> AnyModel:
"""Return the model without locking it."""
return self._locker.model
@dataclass
class LoadedModel(LoadedModelWithoutConfig):
"""Context manager object that mediates transfer from RAM<->VRAM."""
config: Optional[AnyModelConfig] = None
# TODO(MM2):
# Some "intermediary" subclasses in the ModelLoaderBase class hierarchy define methods that their subclasses don't
# know about. I think the problem may be related to this class being an ABC.

View File

@@ -16,7 +16,7 @@ from invokeai.backend.model_manager.config import DiffusersConfigBase, ModelType
from invokeai.backend.model_manager.load.convert_cache import ModelConvertCacheBase
from invokeai.backend.model_manager.load.load_base import LoadedModel, ModelLoaderBase
from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase, ModelLockerBase
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_fs
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data, calc_model_size_by_fs
from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init
from invokeai.backend.util.devices import TorchDevice
@@ -84,7 +84,7 @@ class ModelLoader(ModelLoaderBase):
except IndexError:
pass
cache_path: Path = self._convert_cache.cache_path(str(model_path))
cache_path: Path = self._convert_cache.cache_path(config.key)
if self._needs_conversion(config, model_path, cache_path):
loaded_model = self._do_convert(config, model_path, cache_path, submodel_type)
else:
@@ -95,6 +95,7 @@ class ModelLoader(ModelLoaderBase):
config.key,
submodel_type=submodel_type,
model=loaded_model,
size=calc_model_size_by_data(loaded_model),
)
return self._ram_cache.get(
@@ -125,7 +126,9 @@ class ModelLoader(ModelLoaderBase):
if subtype == submodel_type:
continue
if submodel := getattr(pipeline, subtype.value, None):
self._ram_cache.put(config.key, submodel_type=subtype, model=submodel)
self._ram_cache.put(
config.key, submodel_type=subtype, model=submodel, size=calc_model_size_by_data(submodel)
)
return getattr(pipeline, submodel_type.value) if submodel_type else pipeline
def _needs_conversion(self, config: AnyModelConfig, model_path: Path, dest_path: Path) -> bool:

View File

@@ -30,11 +30,6 @@ class ModelLockerBase(ABC):
"""Unlock the contained model, and remove it from VRAM."""
pass
@abstractmethod
def get_state_dict(self) -> Optional[Dict[str, torch.Tensor]]:
"""Return the state dict (if any) for the cached model."""
pass
@property
@abstractmethod
def model(self) -> AnyModel:
@@ -61,11 +56,6 @@ class CacheRecord(Generic[T]):
and then injected into the model. When the model is finished, the VRAM
copy of the state dict is deleted, and the RAM version is reinjected
into the model.
The state_dict should be treated as a read-only attribute. Do not attempt
to patch or otherwise modify it. Instead, patch the copy of the state_dict
after it is loaded into the execution device (e.g. CUDA) using the `LoadedModel`
context manager call `model_on_device()`.
"""
key: str
@@ -169,6 +159,7 @@ class ModelCacheBase(ABC, Generic[T]):
self,
key: str,
model: T,
size: int,
submodel_type: Optional[SubModelType] = None,
) -> None:
"""Store model under key and optional submodel_type."""

View File

@@ -29,7 +29,6 @@ import torch
from invokeai.backend.model_manager import AnyModel, SubModelType
from invokeai.backend.model_manager.load.memory_snapshot import MemorySnapshot, get_pretty_snapshot_diff
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data
from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.logging import InvokeAILogger
@@ -154,13 +153,13 @@ class ModelCache(ModelCacheBase[AnyModel]):
self,
key: str,
model: AnyModel,
size: int,
submodel_type: Optional[SubModelType] = None,
) -> None:
"""Store model under key and optional submodel_type."""
key = self._make_cache_key(key, submodel_type)
if key in self._cached_models:
return
size = calc_model_size_by_data(model)
self.make_room(size)
state_dict = model.state_dict() if isinstance(model, torch.nn.Module) else None
@@ -253,7 +252,12 @@ class ModelCache(ModelCacheBase[AnyModel]):
May raise a torch.cuda.OutOfMemoryError
"""
# These attributes are not in the base ModelMixin class but in various derived classes.
# Some models don't have these attributes, in which case they run in RAM/CPU.
self.logger.debug(f"Called to move {cache_entry.key} to {target_device}")
if not (hasattr(cache_entry.model, "device") and hasattr(cache_entry.model, "to")):
return
source_device = cache_entry.device
# Note: We compare device types only so that 'cuda' == 'cuda:0'.
@@ -261,10 +265,6 @@ class ModelCache(ModelCacheBase[AnyModel]):
if torch.device(source_device).type == torch.device(target_device).type:
return
# Some models don't have a `to` method, in which case they run in RAM/CPU.
if not hasattr(cache_entry.model, "to"):
return
# This roundabout method for moving the model around is done to avoid
# the cost of moving the model from RAM to VRAM and then back from VRAM to RAM.
# When moving to VRAM, we copy (not move) each element of the state dict from

View File

@@ -2,8 +2,6 @@
Base class and implementation of a class that moves models in and out of VRAM.
"""
from typing import Dict, Optional
import torch
from invokeai.backend.model_manager import AnyModel
@@ -29,18 +27,20 @@ class ModelLocker(ModelLockerBase):
"""Return the model without moving it around."""
return self._cache_entry.model
def get_state_dict(self) -> Optional[Dict[str, torch.Tensor]]:
"""Return the state dict (if any) for the cached model."""
return self._cache_entry.state_dict
def lock(self) -> AnyModel:
"""Move the model into the execution device (GPU) and lock it."""
if not hasattr(self.model, "to"):
return self.model
# NOTE that the model has to have the to() method in order for this code to move it into GPU!
self._cache_entry.lock()
try:
if self._cache.lazy_offloading:
self._cache.offload_unlocked_models(self._cache_entry.size)
self._cache.move_model_to_device(self._cache_entry, self._cache.execution_device)
self._cache_entry.loaded = True
self._cache.logger.debug(f"Locking {self._cache_entry.key} in {self._cache.execution_device}")
self._cache.print_cuda_stats()
except torch.cuda.OutOfMemoryError:
@@ -55,7 +55,10 @@ class ModelLocker(ModelLockerBase):
def unlock(self) -> None:
"""Call upon exit from context."""
if not hasattr(self.model, "to"):
return
self._cache_entry.unlock()
if not self._cache.lazy_offloading:
self._cache.offload_unlocked_models(0)
self._cache.offload_unlocked_models(self._cache_entry.size)
self._cache.print_cuda_stats()

View File

@@ -65,11 +65,14 @@ class GenericDiffusersLoader(ModelLoader):
else:
try:
config = self._load_diffusers_config(model_path, config_name="config.json")
if class_name := config.get("_class_name"):
class_name = config.get("_class_name", None)
if class_name:
result = self._hf_definition_to_type(module="diffusers", class_name=class_name)
elif class_name := config.get("architectures"):
if config.get("model_type", None) == "clip_vision_model":
class_name = config.get("architectures")
assert class_name is not None
result = self._hf_definition_to_type(module="transformers", class_name=class_name[0])
else:
if not class_name:
raise InvalidModelConfigException("Unable to decipher Load Class based on given config.json")
except KeyError as e:
raise InvalidModelConfigException("An expected config.json file is missing from this model.") from e

View File

@@ -83,7 +83,7 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
assert s.size is not None
files.append(
RemoteModelFile(
url=hf_hub_url(id, s.rfilename, revision=variant or "main"),
url=hf_hub_url(id, s.rfilename, revision=variant),
path=Path(name, s.rfilename),
size=s.size,
sha256=s.lfs.get("sha256") if s.lfs else None,

View File

@@ -37,12 +37,9 @@ class RemoteModelFile(BaseModel):
url: AnyHttpUrl = Field(description="The url to download this model file")
path: Path = Field(description="The path to the file, relative to the model root")
size: Optional[int] = Field(description="The size of this file, in bytes", default=0)
size: int = Field(description="The size of this file, in bytes")
sha256: Optional[str] = Field(description="SHA256 hash of this model (not always available)", default=None)
def __hash__(self) -> int:
return hash(str(self))
class ModelMetadataBase(BaseModel):
"""Base class for model metadata information."""

View File

@@ -5,7 +5,7 @@ from __future__ import annotations
import pickle
from contextlib import contextmanager
from typing import Any, Dict, Generator, Iterator, List, Optional, Tuple, Union
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
import torch
@@ -66,14 +66,8 @@ class ModelPatcher:
cls,
unet: UNet2DConditionModel,
loras: Iterator[Tuple[LoRAModelRaw, float]],
model_state_dict: Optional[Dict[str, torch.Tensor]] = None,
) -> None:
with cls.apply_lora(
unet,
loras=loras,
prefix="lora_unet_",
model_state_dict=model_state_dict,
):
with cls.apply_lora(unet, loras, "lora_unet_"):
yield
@classmethod
@@ -82,9 +76,28 @@ class ModelPatcher:
cls,
text_encoder: CLIPTextModel,
loras: Iterator[Tuple[LoRAModelRaw, float]],
model_state_dict: Optional[Dict[str, torch.Tensor]] = None,
) -> None:
with cls.apply_lora(text_encoder, loras=loras, prefix="lora_te_", model_state_dict=model_state_dict):
with cls.apply_lora(text_encoder, loras, "lora_te_"):
yield
@classmethod
@contextmanager
def apply_sdxl_lora_text_encoder(
cls,
text_encoder: CLIPTextModel,
loras: List[Tuple[LoRAModelRaw, float]],
) -> None:
with cls.apply_lora(text_encoder, loras, "lora_te1_"):
yield
@classmethod
@contextmanager
def apply_sdxl_lora_text_encoder2(
cls,
text_encoder: CLIPTextModel,
loras: List[Tuple[LoRAModelRaw, float]],
) -> None:
with cls.apply_lora(text_encoder, loras, "lora_te2_"):
yield
@classmethod
@@ -94,16 +107,7 @@ class ModelPatcher:
model: AnyModel,
loras: Iterator[Tuple[LoRAModelRaw, float]],
prefix: str,
model_state_dict: Optional[Dict[str, torch.Tensor]] = None,
) -> Generator[Any, None, None]:
"""
Apply one or more LoRAs to a model.
:param model: The model to patch.
:param loras: An iterator that returns the LoRA to patch in and its patch weight.
:param prefix: A string prefix that precedes keys used in the LoRAs weight layers.
:model_state_dict: Read-only copy of the model's state dict in CPU, for unpatching purposes.
"""
) -> None:
original_weights = {}
try:
with torch.no_grad():
@@ -129,10 +133,7 @@ class ModelPatcher:
dtype = module.weight.dtype
if module_key not in original_weights:
if model_state_dict is not None: # we were provided with the CPU copy of the state dict
original_weights[module_key] = model_state_dict[module_key + ".weight"]
else:
original_weights[module_key] = module.weight.detach().to(device="cpu", copy=True)
original_weights[module_key] = module.weight.detach().to(device="cpu", copy=True)
layer_scale = layer.alpha / layer.rank if (layer.alpha and layer.rank) else 1.0

View File

@@ -1,8 +1,6 @@
import base64
import io
import os
import re
import unicodedata
import warnings
from pathlib import Path
@@ -14,33 +12,6 @@ from transformers import logging as transformers_logging
GIG = 1073741824
def slugify(value: str, allow_unicode: bool = False) -> str:
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Replace slashes with underscores.
Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
Adapted from Django: https://github.com/django/django/blob/main/django/utils/text.py
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize("NFKC", value)
else:
value = unicodedata.normalize("NFKD", value).encode("ascii", "ignore").decode("ascii")
value = re.sub(r"[/]", "_", value.lower())
value = re.sub(r"[^.\w\s-]", "", value.lower())
return re.sub(r"[-\s]+", "-", value).strip("-_")
def safe_filename(directory: Path, value: str) -> str:
"""Make a string safe to use as a filename."""
escaped_string = slugify(value)
max_name_length = os.pathconf(directory, "PC_NAME_MAX") if hasattr(os, "pathconf") else 256
return escaped_string[len(escaped_string) - max_name_length :]
def directory_size(directory: Path) -> int:
"""
Return the aggregate size of all files in a directory (bytes).

View File

@@ -1021,8 +1021,7 @@
"float": "Kommazahlen",
"enum": "Aufzählung",
"fullyContainNodes": "Vollständig ausgewählte Nodes auswählen",
"editMode": "Im Workflow-Editor bearbeiten",
"resetToDefaultValue": "Auf Standardwert zurücksetzen"
"editMode": "Im Workflow-Editor bearbeiten"
},
"hrf": {
"enableHrf": "Korrektur für hohe Auflösungen",

View File

@@ -148,8 +148,6 @@
"viewingDesc": "Review images in a large gallery view",
"editing": "Editing",
"editingDesc": "Edit on the Control Layers canvas",
"comparing": "Comparing",
"comparingDesc": "Comparing two images",
"enabled": "Enabled",
"disabled": "Disabled"
},
@@ -377,23 +375,7 @@
"bulkDownloadRequestFailed": "Problem Preparing Download",
"bulkDownloadFailed": "Download Failed",
"problemDeletingImages": "Problem Deleting Images",
"problemDeletingImagesDesc": "One or more images could not be deleted",
"viewerImage": "Viewer Image",
"compareImage": "Compare Image",
"openInViewer": "Open in Viewer",
"selectForCompare": "Select for Compare",
"selectAnImageToCompare": "Select an Image to Compare",
"slider": "Slider",
"sideBySide": "Side-by-Side",
"hover": "Hover",
"swapImages": "Swap Images",
"compareOptions": "Comparison Options",
"stretchToFit": "Stretch to Fit",
"exitCompare": "Exit Compare",
"compareHelp1": "Hold <Kbd>Alt</Kbd> while clicking a gallery image or using the arrow keys to change the compare image.",
"compareHelp2": "Press <Kbd>M</Kbd> to cycle through comparison modes.",
"compareHelp3": "Press <Kbd>C</Kbd> to swap the compared images.",
"compareHelp4": "Press <Kbd>Z</Kbd> or <Kbd>Esc</Kbd> to exit."
"problemDeletingImagesDesc": "One or more images could not be deleted"
},
"hotkeys": {
"searchHotkeys": "Search Hotkeys",

View File

@@ -6,7 +6,7 @@
"settingsLabel": "Ajustes",
"img2img": "Imagen a Imagen",
"unifiedCanvas": "Lienzo Unificado",
"nodes": "Flujos de trabajo",
"nodes": "Editor del flujo de trabajo",
"upload": "Subir imagen",
"load": "Cargar",
"statusDisconnected": "Desconectado",
@@ -14,7 +14,7 @@
"discordLabel": "Discord",
"back": "Atrás",
"loading": "Cargando",
"postprocessing": "Postprocesado",
"postprocessing": "Tratamiento posterior",
"txt2img": "De texto a imagen",
"accept": "Aceptar",
"cancel": "Cancelar",
@@ -42,42 +42,7 @@
"copy": "Copiar",
"beta": "Beta",
"on": "En",
"aboutDesc": "¿Utilizas Invoke para trabajar? Mira aquí:",
"installed": "Instalado",
"green": "Verde",
"editor": "Editor",
"orderBy": "Ordenar por",
"file": "Archivo",
"goTo": "Ir a",
"imageFailedToLoad": "No se puede cargar la imagen",
"saveAs": "Guardar Como",
"somethingWentWrong": "Algo salió mal",
"nextPage": "Página Siguiente",
"selected": "Seleccionado",
"tab": "Tabulador",
"positivePrompt": "Prompt Positivo",
"negativePrompt": "Prompt Negativo",
"error": "Error",
"format": "formato",
"unknown": "Desconocido",
"input": "Entrada",
"nodeEditor": "Editor de nodos",
"template": "Plantilla",
"prevPage": "Página Anterior",
"red": "Rojo",
"alpha": "Transparencia",
"outputs": "Salidas",
"editing": "Editando",
"learnMore": "Aprende más",
"enabled": "Activado",
"disabled": "Desactivado",
"folder": "Carpeta",
"updated": "Actualizado",
"created": "Creado",
"save": "Guardar",
"unknownError": "Error Desconocido",
"blue": "Azul",
"viewingDesc": "Revisar imágenes en una vista de galería grande"
"aboutDesc": "¿Utilizas Invoke para trabajar? Mira aquí:"
},
"gallery": {
"galleryImageSize": "Tamaño de la imagen",
@@ -502,8 +467,7 @@
"about": "Acerca de",
"createIssue": "Crear un problema",
"resetUI": "Interfaz de usuario $t(accessibility.reset)",
"mode": "Modo",
"submitSupportTicket": "Enviar Ticket de Soporte"
"mode": "Modo"
},
"nodes": {
"zoomInNodes": "Acercar",
@@ -579,17 +543,5 @@
"layers_one": "Capa",
"layers_many": "Capas",
"layers_other": "Capas"
},
"controlnet": {
"crop": "Cortar",
"delete": "Eliminar",
"depthAnythingDescription": "Generación de mapa de profundidad usando la técnica de Depth Anything",
"duplicate": "Duplicar",
"colorMapDescription": "Genera un mapa de color desde la imagen",
"depthMidasDescription": "Crea un mapa de profundidad con Midas",
"balanced": "Equilibrado",
"beginEndStepPercent": "Inicio / Final Porcentaje de pasos",
"detectResolution": "Detectar resolución",
"beginEndStepPercentShort": "Inicio / Final %"
}
}

View File

@@ -45,7 +45,7 @@
"outputs": "Risultati",
"data": "Dati",
"somethingWentWrong": "Qualcosa è andato storto",
"copyError": "Errore $t(gallery.copy)",
"copyError": "$t(gallery.copy) Errore",
"input": "Ingresso",
"notInstalled": "Non $t(common.installed)",
"unknownError": "Errore sconosciuto",
@@ -85,11 +85,7 @@
"viewing": "Visualizza",
"viewingDesc": "Rivedi le immagini in un'ampia vista della galleria",
"editing": "Modifica",
"editingDesc": "Modifica nell'area Livelli di controllo",
"enabled": "Abilitato",
"disabled": "Disabilitato",
"comparingDesc": "Confronta due immagini",
"comparing": "Confronta"
"editingDesc": "Modifica nell'area Livelli di controllo"
},
"gallery": {
"galleryImageSize": "Dimensione dell'immagine",
@@ -126,30 +122,14 @@
"bulkDownloadRequestedDesc": "La tua richiesta di download è in preparazione. L'operazione potrebbe richiedere alcuni istanti.",
"bulkDownloadRequestFailed": "Problema durante la preparazione del download",
"bulkDownloadFailed": "Scaricamento fallito",
"alwaysShowImageSizeBadge": "Mostra sempre le dimensioni dell'immagine",
"openInViewer": "Apri nel visualizzatore",
"selectForCompare": "Seleziona per il confronto",
"selectAnImageToCompare": "Seleziona un'immagine da confrontare",
"slider": "Cursore",
"sideBySide": "Fianco a Fianco",
"compareImage": "Immagine di confronto",
"viewerImage": "Immagine visualizzata",
"hover": "Al passaggio del mouse",
"swapImages": "Scambia le immagini",
"compareOptions": "Opzioni di confronto",
"stretchToFit": "Scala per adattare",
"exitCompare": "Esci dal confronto",
"compareHelp1": "Tieni premuto <Kbd>Alt</Kbd> mentre fai clic su un'immagine della galleria o usi i tasti freccia per cambiare l'immagine di confronto.",
"compareHelp2": "Premi <Kbd>M</Kbd> per scorrere le modalità di confronto.",
"compareHelp3": "Premi <Kbd>C</Kbd> per scambiare le immagini confrontate.",
"compareHelp4": "Premi <Kbd>Z</Kbd> o <Kbd>Esc</Kbd> per uscire."
"alwaysShowImageSizeBadge": "Mostra sempre le dimensioni dell'immagine"
},
"hotkeys": {
"keyboardShortcuts": "Tasti di scelta rapida",
"appHotkeys": "Applicazione",
"generalHotkeys": "Generale",
"galleryHotkeys": "Galleria",
"unifiedCanvasHotkeys": "Tela",
"unifiedCanvasHotkeys": "Tela Unificata",
"invoke": {
"title": "Invoke",
"desc": "Genera un'immagine"
@@ -167,8 +147,8 @@
"desc": "Apre e chiude il pannello delle opzioni"
},
"pinOptions": {
"title": "Fissa le opzioni",
"desc": "Fissa il pannello delle opzioni"
"title": "Appunta le opzioni",
"desc": "Blocca il pannello delle opzioni"
},
"toggleGallery": {
"title": "Attiva/disattiva galleria",
@@ -352,14 +332,14 @@
"title": "Annulla e cancella"
},
"resetOptionsAndGallery": {
"title": "Ripristina le opzioni e la galleria",
"desc": "Reimposta i pannelli delle opzioni e della galleria"
"title": "Ripristina Opzioni e Galleria",
"desc": "Reimposta le opzioni e i pannelli della galleria"
},
"searchHotkeys": "Cerca tasti di scelta rapida",
"noHotkeysFound": "Nessun tasto di scelta rapida trovato",
"toggleOptionsAndGallery": {
"desc": "Apre e chiude le opzioni e i pannelli della galleria",
"title": "Attiva/disattiva le opzioni e la galleria"
"title": "Attiva/disattiva le Opzioni e la Galleria"
},
"clearSearch": "Cancella ricerca",
"remixImage": {
@@ -368,7 +348,7 @@
},
"toggleViewer": {
"title": "Attiva/disattiva il visualizzatore di immagini",
"desc": "Passa dal visualizzatore immagini all'area di lavoro per la scheda corrente."
"desc": "Passa dal Visualizzatore immagini all'area di lavoro per la scheda corrente."
}
},
"modelManager": {
@@ -398,7 +378,7 @@
"convertToDiffusers": "Converti in Diffusori",
"convertToDiffusersHelpText2": "Questo processo sostituirà la voce in Gestione Modelli con la versione Diffusori dello stesso modello.",
"convertToDiffusersHelpText4": "Questo è un processo una tantum. Potrebbero essere necessari circa 30-60 secondi a seconda delle specifiche del tuo computer.",
"convertToDiffusersHelpText5": "Assicurati di avere spazio su disco sufficiente. I modelli generalmente variano tra 2 GB e 7 GB in dimensione.",
"convertToDiffusersHelpText5": "Assicurati di avere spazio su disco sufficiente. I modelli generalmente variano tra 2 GB e 7 GB di dimensioni.",
"convertToDiffusersHelpText6": "Vuoi convertire questo modello?",
"modelConverted": "Modello convertito",
"alpha": "Alpha",
@@ -548,7 +528,7 @@
"layer": {
"initialImageNoImageSelected": "Nessuna immagine iniziale selezionata",
"t2iAdapterIncompatibleDimensions": "L'adattatore T2I richiede che la dimensione dell'immagine sia un multiplo di {{multiple}}",
"controlAdapterNoModelSelected": "Nessun modello di adattatore di controllo selezionato",
"controlAdapterNoModelSelected": "Nessun modello di Adattatore di Controllo selezionato",
"controlAdapterIncompatibleBaseModel": "Il modello base dell'adattatore di controllo non è compatibile",
"controlAdapterNoImageSelected": "Nessuna immagine dell'adattatore di controllo selezionata",
"controlAdapterImageNotProcessed": "Immagine dell'adattatore di controllo non elaborata",
@@ -626,25 +606,25 @@
"canvasMerged": "Tela unita",
"sentToImageToImage": "Inviato a Generazione da immagine",
"sentToUnifiedCanvas": "Inviato alla Tela",
"parametersNotSet": "Parametri non richiamati",
"parametersNotSet": "Parametri non impostati",
"metadataLoadFailed": "Impossibile caricare i metadati",
"serverError": "Errore del Server",
"connected": "Connesso al server",
"connected": "Connesso al Server",
"canceled": "Elaborazione annullata",
"uploadFailedInvalidUploadDesc": "Deve essere una singola immagine PNG o JPEG",
"parameterSet": "Parametro richiamato",
"parameterNotSet": "Parametro non richiamato",
"parameterSet": "{{parameter}} impostato",
"parameterNotSet": "{{parameter}} non impostato",
"problemCopyingImage": "Impossibile copiare l'immagine",
"baseModelChangedCleared_one": "Cancellato o disabilitato {{count}} sottomodello incompatibile",
"baseModelChangedCleared_many": "Cancellati o disabilitati {{count}} sottomodelli incompatibili",
"baseModelChangedCleared_other": "Cancellati o disabilitati {{count}} sottomodelli incompatibili",
"baseModelChangedCleared_one": "Il modello base è stato modificato, cancellato o disabilitato {{count}} sotto-modello incompatibile",
"baseModelChangedCleared_many": "Il modello base è stato modificato, cancellato o disabilitato {{count}} sotto-modelli incompatibili",
"baseModelChangedCleared_other": "Il modello base è stato modificato, cancellato o disabilitato {{count}} sotto-modelli incompatibili",
"imageSavingFailed": "Salvataggio dell'immagine non riuscito",
"canvasSentControlnetAssets": "Tela inviata a ControlNet & Risorse",
"problemCopyingCanvasDesc": "Impossibile copiare la tela",
"loadedWithWarnings": "Flusso di lavoro caricato con avvisi",
"canvasCopiedClipboard": "Tela copiata negli appunti",
"maskSavedAssets": "Maschera salvata nelle risorse",
"problemDownloadingCanvas": "Problema durante lo scarico della tela",
"problemDownloadingCanvas": "Problema durante il download della tela",
"problemMergingCanvas": "Problema nell'unione delle tele",
"imageUploaded": "Immagine caricata",
"addedToBoard": "Aggiunto alla bacheca",
@@ -678,17 +658,7 @@
"problemDownloadingImage": "Impossibile scaricare l'immagine",
"prunedQueue": "Coda ripulita",
"modelImportCanceled": "Importazione del modello annullata",
"parameters": "Parametri",
"parameterSetDesc": "{{parameter}} richiamato",
"parameterNotSetDesc": "Impossibile richiamare {{parameter}}",
"parameterNotSetDescWithMessage": "Impossibile richiamare {{parameter}}: {{message}}",
"parametersSet": "Parametri richiamati",
"errorCopied": "Errore copiato",
"outOfMemoryError": "Errore di memoria esaurita",
"baseModelChanged": "Modello base modificato",
"sessionRef": "Sessione: {{sessionId}}",
"somethingWentWrong": "Qualcosa è andato storto",
"outOfMemoryErrorDesc": "Le impostazioni della generazione attuale superano la capacità del sistema. Modifica le impostazioni e riprova."
"parameters": "Parametri"
},
"tooltip": {
"feature": {
@@ -704,7 +674,7 @@
"layer": "Livello",
"base": "Base",
"mask": "Maschera",
"maskingOptions": "Opzioni maschera",
"maskingOptions": "Opzioni di mascheramento",
"enableMask": "Abilita maschera",
"preserveMaskedArea": "Mantieni area mascherata",
"clearMask": "Cancella maschera (Shift+C)",
@@ -775,8 +745,7 @@
"mode": "Modalità",
"resetUI": "$t(accessibility.reset) l'Interfaccia Utente",
"createIssue": "Segnala un problema",
"about": "Informazioni",
"submitSupportTicket": "Invia ticket di supporto"
"about": "Informazioni"
},
"nodes": {
"zoomOutNodes": "Rimpicciolire",
@@ -821,7 +790,7 @@
"workflowNotes": "Note",
"versionUnknown": " Versione sconosciuta",
"unableToValidateWorkflow": "Impossibile convalidare il flusso di lavoro",
"updateApp": "Aggiorna Applicazione",
"updateApp": "Aggiorna App",
"unableToLoadWorkflow": "Impossibile caricare il flusso di lavoro",
"updateNode": "Aggiorna nodo",
"version": "Versione",
@@ -913,14 +882,11 @@
"missingNode": "Nodo di invocazione mancante",
"missingInvocationTemplate": "Modello di invocazione mancante",
"missingFieldTemplate": "Modello di campo mancante",
"singleFieldType": "{{name}} (Singola)",
"imageAccessError": "Impossibile trovare l'immagine {{image_name}}, ripristino delle impostazioni predefinite",
"boardAccessError": "Impossibile trovare la bacheca {{board_id}}, ripristino ai valori predefiniti",
"modelAccessError": "Impossibile trovare il modello {{key}}, ripristino ai valori predefiniti"
"singleFieldType": "{{name}} (Singola)"
},
"boards": {
"autoAddBoard": "Aggiungi automaticamente bacheca",
"menuItemAutoAdd": "Aggiungi automaticamente a questa bacheca",
"menuItemAutoAdd": "Aggiungi automaticamente a questa Bacheca",
"cancel": "Annulla",
"addBoard": "Aggiungi Bacheca",
"bottomMessage": "L'eliminazione di questa bacheca e delle sue immagini ripristinerà tutte le funzionalità che le stanno attualmente utilizzando.",
@@ -932,7 +898,7 @@
"myBoard": "Bacheca",
"searchBoard": "Cerca bacheche ...",
"noMatching": "Nessuna bacheca corrispondente",
"selectBoard": "Seleziona una bacheca",
"selectBoard": "Seleziona una Bacheca",
"uncategorized": "Non categorizzato",
"downloadBoard": "Scarica la bacheca",
"deleteBoardOnly": "solo la Bacheca",
@@ -953,7 +919,7 @@
"control": "Controllo",
"crop": "Ritaglia",
"depthMidas": "Profondità (Midas)",
"detectResolution": "Rileva la risoluzione",
"detectResolution": "Rileva risoluzione",
"controlMode": "Modalità di controllo",
"cannyDescription": "Canny rilevamento bordi",
"depthZoe": "Profondità (Zoe)",
@@ -964,7 +930,7 @@
"showAdvanced": "Mostra opzioni Avanzate",
"bgth": "Soglia rimozione sfondo",
"importImageFromCanvas": "Importa immagine dalla Tela",
"lineartDescription": "Converte l'immagine in linea",
"lineartDescription": "Converte l'immagine in lineart",
"importMaskFromCanvas": "Importa maschera dalla Tela",
"hideAdvanced": "Nascondi opzioni avanzate",
"resetControlImage": "Reimposta immagine di controllo",
@@ -980,7 +946,7 @@
"pidiDescription": "Elaborazione immagini PIDI",
"fill": "Riempie",
"colorMapDescription": "Genera una mappa dei colori dall'immagine",
"lineartAnimeDescription": "Elaborazione linea in stile anime",
"lineartAnimeDescription": "Elaborazione lineart in stile anime",
"imageResolution": "Risoluzione dell'immagine",
"colorMap": "Colore",
"lowThreshold": "Soglia inferiore",

View File

@@ -87,11 +87,7 @@
"viewing": "Просмотр",
"editing": "Редактирование",
"viewingDesc": "Просмотр изображений в режиме большой галереи",
"editingDesc": "Редактировать на холсте слоёв управления",
"enabled": "Включено",
"disabled": "Отключено",
"comparingDesc": "Сравнение двух изображений",
"comparing": "Сравнение"
"editingDesc": "Редактировать на холсте слоёв управления"
},
"gallery": {
"galleryImageSize": "Размер изображений",
@@ -128,23 +124,7 @@
"bulkDownloadRequested": "Подготовка к скачиванию",
"bulkDownloadRequestedDesc": "Ваш запрос на скачивание готовится. Это может занять несколько минут.",
"bulkDownloadRequestFailed": "Возникла проблема при подготовке скачивания",
"alwaysShowImageSizeBadge": "Всегда показывать значок размера изображения",
"openInViewer": "Открыть в просмотрщике",
"selectForCompare": "Выбрать для сравнения",
"hover": "Наведение",
"swapImages": "Поменять местами",
"stretchToFit": "Растягивание до нужного размера",
"exitCompare": "Выйти из сравнения",
"compareHelp4": "Нажмите <Kbd>Z</Kbd> или <Kbd>Esc</Kbd> для выхода.",
"compareImage": "Сравнить изображение",
"viewerImage": "Изображение просмотрщика",
"selectAnImageToCompare": "Выберите изображение для сравнения",
"slider": "Слайдер",
"sideBySide": "Бок о бок",
"compareOptions": "Варианты сравнения",
"compareHelp1": "Удерживайте <Kbd>Alt</Kbd> при нажатии на изображение в галерее или при помощи клавиш со стрелками, чтобы изменить сравниваемое изображение.",
"compareHelp2": "Нажмите <Kbd>M</Kbd>, чтобы переключиться между режимами сравнения.",
"compareHelp3": "Нажмите <Kbd>C</Kbd>, чтобы поменять местами сравниваемые изображения."
"alwaysShowImageSizeBadge": "Всегда показывать значок размера изображения"
},
"hotkeys": {
"keyboardShortcuts": "Горячие клавиши",
@@ -548,20 +528,7 @@
"missingFieldTemplate": "Отсутствует шаблон поля",
"addingImagesTo": "Добавление изображений в",
"invoke": "Создать",
"imageNotProcessedForControlAdapter": "Изображение адаптера контроля №{{number}} не обрабатывается",
"layer": {
"controlAdapterImageNotProcessed": "Изображение адаптера контроля не обработано",
"ipAdapterNoModelSelected": "IP адаптер не выбран",
"controlAdapterNoModelSelected": "не выбрана модель адаптера контроля",
"controlAdapterIncompatibleBaseModel": "несовместимая базовая модель адаптера контроля",
"controlAdapterNoImageSelected": "не выбрано изображение контрольного адаптера",
"initialImageNoImageSelected": "начальное изображение не выбрано",
"rgNoRegion": "регион не выбран",
"rgNoPromptsOrIPAdapters": "нет текстовых запросов или IP-адаптеров",
"ipAdapterIncompatibleBaseModel": "несовместимая базовая модель IP-адаптера",
"t2iAdapterIncompatibleDimensions": "Адаптер T2I требует, чтобы размеры изображения были кратны {{multiple}}",
"ipAdapterNoImageSelected": "изображение IP-адаптера не выбрано"
}
"imageNotProcessedForControlAdapter": "Изображение адаптера контроля №{{number}} не обрабатывается"
},
"isAllowedToUpscale": {
"useX2Model": "Изображение слишком велико для увеличения с помощью модели x4. Используйте модель x2",
@@ -639,12 +606,12 @@
"connected": "Подключено к серверу",
"canceled": "Обработка отменена",
"uploadFailedInvalidUploadDesc": "Должно быть одно изображение в формате PNG или JPEG",
"parameterNotSet": "Параметр не задан",
"parameterSet": "Параметр задан",
"parameterNotSet": "Параметр {{parameter}} не задан",
"parameterSet": "Параметр {{parameter}} задан",
"problemCopyingImage": "Не удается скопировать изображение",
"baseModelChangedCleared_one": "Очищена или отключена {{count}} несовместимая подмодель",
"baseModelChangedCleared_few": "Очищены или отключены {{count}} несовместимые подмодели",
"baseModelChangedCleared_many": "Очищены или отключены {{count}} несовместимых подмоделей",
"baseModelChangedCleared_one": "Базовая модель изменила, очистила или отключила {{count}} несовместимую подмодель",
"baseModelChangedCleared_few": "Базовая модель изменила, очистила или отключила {{count}} несовместимые подмодели",
"baseModelChangedCleared_many": "Базовая модель изменила, очистила или отключила {{count}} несовместимых подмоделей",
"imageSavingFailed": "Не удалось сохранить изображение",
"canvasSentControlnetAssets": "Холст отправлен в ControlNet и ресурсы",
"problemCopyingCanvasDesc": "Невозможно экспортировать базовый слой",
@@ -685,17 +652,7 @@
"resetInitialImage": "Сбросить начальное изображение",
"prunedQueue": "Урезанная очередь",
"modelImportCanceled": "Импорт модели отменен",
"parameters": "Параметры",
"parameterSetDesc": "Задан {{parameter}}",
"parameterNotSetDesc": "Невозможно задать {{parameter}}",
"baseModelChanged": "Базовая модель сменена",
"parameterNotSetDescWithMessage": "Не удалось задать {{parameter}}: {{message}}",
"parametersSet": "Параметры заданы",
"errorCopied": "Ошибка скопирована",
"sessionRef": "Сессия: {{sessionId}}",
"outOfMemoryError": "Ошибка нехватки памяти",
"outOfMemoryErrorDesc": "Ваши текущие настройки генерации превышают возможности системы. Пожалуйста, измените настройки и повторите попытку.",
"somethingWentWrong": "Что-то пошло не так"
"parameters": "Параметры"
},
"tooltip": {
"feature": {
@@ -782,8 +739,7 @@
"loadMore": "Загрузить больше",
"resetUI": "$t(accessibility.reset) интерфейс",
"createIssue": "Сообщить о проблеме",
"about": "Об этом",
"submitSupportTicket": "Отправить тикет в службу поддержки"
"about": "Об этом"
},
"nodes": {
"zoomInNodes": "Увеличьте масштаб",
@@ -876,7 +832,7 @@
"workflowName": "Название",
"collection": "Коллекция",
"unknownErrorValidatingWorkflow": "Неизвестная ошибка при проверке рабочего процесса",
"collectionFieldType": "{{name}} (Коллекция)",
"collectionFieldType": "Коллекция {{name}}",
"workflowNotes": "Примечания",
"string": "Строка",
"unknownNodeType": "Неизвестный тип узла",
@@ -892,7 +848,7 @@
"targetNodeDoesNotExist": "Недопустимое ребро: целевой/входной узел {{node}} не существует",
"mismatchedVersion": "Недопустимый узел: узел {{node}} типа {{type}} имеет несоответствующую версию (попробовать обновить?)",
"unknownFieldType": "$t(nodes.unknownField) тип: {{type}}",
"collectionOrScalarFieldType": "{{name}} (Один или коллекция)",
"collectionOrScalarFieldType": "Коллекция | Скаляр {{name}}",
"betaDesc": "Этот вызов находится в бета-версии. Пока он не станет стабильным, в нем могут происходить изменения при обновлении приложений. Мы планируем поддерживать этот вызов в течение длительного времени.",
"nodeVersion": "Версия узла",
"loadingNodes": "Загрузка узлов...",
@@ -914,16 +870,7 @@
"noFieldsViewMode": "В этом рабочем процессе нет выбранных полей для отображения. Просмотрите полный рабочий процесс для настройки значений.",
"graph": "График",
"showEdgeLabels": "Показать метки на ребрах",
"showEdgeLabelsHelp": "Показать метки на ребрах, указывающие на соединенные узлы",
"cannotMixAndMatchCollectionItemTypes": "Невозможно смешивать и сопоставлять типы элементов коллекции",
"missingNode": "Отсутствует узел вызова",
"missingInvocationTemplate": "Отсутствует шаблон вызова",
"missingFieldTemplate": "Отсутствующий шаблон поля",
"singleFieldType": "{{name}} (Один)",
"noGraph": "Нет графика",
"imageAccessError": "Невозможно найти изображение {{image_name}}, сбрасываем на значение по умолчанию",
"boardAccessError": "Невозможно найти доску {{board_id}}, сбрасываем на значение по умолчанию",
"modelAccessError": "Невозможно найти модель {{key}}, сброс на модель по умолчанию"
"showEdgeLabelsHelp": "Показать метки на ребрах, указывающие на соединенные узлы"
},
"controlnet": {
"amult": "a_mult",
@@ -1494,16 +1441,7 @@
"clearQueueAlertDialog2": "Вы уверены, что хотите очистить очередь?",
"item": "Элемент",
"graphFailedToQueue": "Не удалось поставить график в очередь",
"openQueue": "Открыть очередь",
"prompts_one": "Запрос",
"prompts_few": "Запроса",
"prompts_many": "Запросов",
"iterations_one": "Итерация",
"iterations_few": "Итерации",
"iterations_many": "Итераций",
"generations_one": "Генерация",
"generations_few": "Генерации",
"generations_many": "Генераций"
"openQueue": "Открыть очередь"
},
"sdxl": {
"refinerStart": "Запуск доработчика",

View File

@@ -1,6 +1,6 @@
{
"common": {
"nodes": "工作流程",
"nodes": "節點",
"img2img": "圖片轉圖片",
"statusDisconnected": "已中斷連線",
"back": "返回",
@@ -11,239 +11,17 @@
"reportBugLabel": "回報錯誤",
"githubLabel": "GitHub",
"hotkeysLabel": "快捷鍵",
"languagePickerLabel": "語言",
"languagePickerLabel": "切換語言",
"unifiedCanvas": "統一畫布",
"cancel": "取消",
"txt2img": "文字轉圖片",
"controlNet": "ControlNet",
"advanced": "進階",
"folder": "資料夾",
"installed": "已安裝",
"accept": "接受",
"goTo": "前往",
"input": "輸入",
"random": "隨機",
"selected": "已選擇",
"communityLabel": "社群",
"loading": "載入中",
"delete": "刪除",
"copy": "複製",
"error": "錯誤",
"file": "檔案",
"format": "格式",
"imageFailedToLoad": "無法載入圖片"
"txt2img": "文字轉圖片"
},
"accessibility": {
"invokeProgressBar": "Invoke 進度條",
"uploadImage": "上傳圖片",
"reset": "重",
"reset": "重",
"nextImage": "下一張圖片",
"previousImage": "上一張圖片",
"menu": "選單",
"loadMore": "載入更多",
"about": "關於",
"createIssue": "建立問題",
"resetUI": "$t(accessibility.reset) 介面",
"submitSupportTicket": "提交支援工單",
"mode": "模式"
},
"boards": {
"loading": "載入中…",
"movingImagesToBoard_other": "正在移動 {{count}} 張圖片至板上:",
"move": "移動",
"uncategorized": "未分類",
"cancel": "取消"
},
"metadata": {
"workflow": "工作流程",
"steps": "步數",
"model": "模型",
"seed": "種子",
"vae": "VAE",
"seamless": "無縫",
"metadata": "元數據",
"width": "寬度",
"height": "高度"
},
"accordions": {
"control": {
"title": "控制"
},
"compositing": {
"title": "合成"
},
"advanced": {
"title": "進階",
"options": "$t(accordions.advanced.title) 選項"
}
},
"hotkeys": {
"nodesHotkeys": "節點",
"cancel": {
"title": "取消"
},
"generalHotkeys": "一般",
"keyboardShortcuts": "快捷鍵",
"appHotkeys": "應用程式"
},
"modelManager": {
"advanced": "進階",
"allModels": "全部模型",
"variant": "變體",
"config": "配置",
"model": "模型",
"selected": "已選擇",
"huggingFace": "HuggingFace",
"install": "安裝",
"metadata": "元數據",
"delete": "刪除",
"description": "描述",
"cancel": "取消",
"convert": "轉換",
"manual": "手動",
"none": "無",
"name": "名稱",
"load": "載入",
"height": "高度",
"width": "寬度",
"search": "搜尋",
"vae": "VAE",
"settings": "設定"
},
"controlnet": {
"mlsd": "M-LSD",
"canny": "Canny",
"duplicate": "重複",
"none": "無",
"pidi": "PIDI",
"h": "H",
"balanced": "平衡",
"crop": "裁切",
"processor": "處理器",
"control": "控制",
"f": "F",
"lineart": "線條藝術",
"w": "W",
"hed": "HED",
"delete": "刪除"
},
"queue": {
"queue": "佇列",
"canceled": "已取消",
"failed": "已失敗",
"completed": "已完成",
"cancel": "取消",
"session": "工作階段",
"batch": "批量",
"item": "項目",
"completedIn": "完成於",
"notReady": "無法排隊"
},
"parameters": {
"cancel": {
"cancel": "取消"
},
"height": "高度",
"type": "類型",
"symmetry": "對稱性",
"images": "圖片",
"width": "寬度",
"coherenceMode": "模式",
"seed": "種子",
"general": "一般",
"strength": "強度",
"steps": "步數",
"info": "資訊"
},
"settings": {
"beta": "Beta",
"developer": "開發者",
"general": "一般",
"models": "模型"
},
"popovers": {
"paramModel": {
"heading": "模型"
},
"compositingCoherenceMode": {
"heading": "模式"
},
"paramSteps": {
"heading": "步數"
},
"controlNetProcessor": {
"heading": "處理器"
},
"paramVAE": {
"heading": "VAE"
},
"paramHeight": {
"heading": "高度"
},
"paramSeed": {
"heading": "種子"
},
"paramWidth": {
"heading": "寬度"
},
"refinerSteps": {
"heading": "步數"
}
},
"unifiedCanvas": {
"undo": "復原",
"mask": "遮罩",
"eraser": "橡皮擦",
"antialiasing": "抗鋸齒",
"redo": "重做",
"layer": "圖層",
"accept": "接受",
"brush": "刷子",
"move": "移動",
"brushSize": "大小"
},
"nodes": {
"workflowName": "名稱",
"notes": "註釋",
"workflowVersion": "版本",
"workflowNotes": "註釋",
"executionStateError": "錯誤",
"unableToUpdateNodes_other": "無法更新 {{count}} 個節點",
"integer": "整數",
"workflow": "工作流程",
"enum": "枚舉",
"edit": "編輯",
"string": "字串",
"workflowTags": "標籤",
"node": "節點",
"boolean": "布林值",
"workflowAuthor": "作者",
"version": "版本",
"executionStateCompleted": "已完成",
"edge": "邊緣",
"versionUnknown": " 版本未知"
},
"sdxl": {
"steps": "步數",
"loading": "載入中…",
"refiner": "精煉器"
},
"gallery": {
"copy": "複製",
"download": "下載",
"loading": "載入中"
},
"ui": {
"tabs": {
"models": "模型",
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
"queue": "佇列"
}
},
"models": {
"loading": "載入中"
},
"workflows": {
"name": "名稱"
"menu": "選單"
}
}

View File

@@ -19,13 +19,6 @@ function ThemeLocaleProvider({ children }: ThemeLocaleProviderProps) {
return extendTheme({
..._theme,
direction,
shadows: {
..._theme.shadows,
selectedForCompare:
'0px 0px 0px 1px var(--invoke-colors-base-900), 0px 0px 0px 4px var(--invoke-colors-green-400)',
hoverSelectedForCompare:
'0px 0px 0px 1px var(--invoke-colors-base-900), 0px 0px 0px 4px var(--invoke-colors-green-300)',
},
});
}, [direction]);

View File

@@ -13,6 +13,7 @@ import {
isControlAdapterLayer,
} from 'features/controlLayers/store/controlLayersSlice';
import { CA_PROCESSOR_DATA } from 'features/controlLayers/util/controlAdapters';
import { isImageOutput } from 'features/nodes/types/common';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { isEqual } from 'lodash-es';
@@ -22,13 +23,7 @@ import type { BatchConfig } from 'services/api/types';
import { socketInvocationComplete } from 'services/events/actions';
import { assert } from 'tsafe';
const matcher = isAnyOf(
caLayerImageChanged,
caLayerProcessedImageChanged,
caLayerProcessorConfigChanged,
caLayerModelChanged,
caLayerRecalled
);
const matcher = isAnyOf(caLayerImageChanged, caLayerProcessorConfigChanged, caLayerModelChanged, caLayerRecalled);
const DEBOUNCE_MS = 300;
const log = logger('session');
@@ -79,10 +74,9 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
const originalConfig = originalLayer?.controlAdapter.processorConfig;
const image = layer.controlAdapter.image;
const processedImage = layer.controlAdapter.processedImage;
const config = layer.controlAdapter.processorConfig;
if (isEqual(config, originalConfig) && isEqual(image, originalImage) && processedImage) {
if (isEqual(config, originalConfig) && isEqual(image, originalImage)) {
// Neither config nor image have changed, we can bail
return;
}
@@ -145,7 +139,7 @@ export const addControlAdapterPreprocessor = (startAppListening: AppStartListeni
// We still have to check the output type
assert(
invocationCompleteAction.payload.data.result.type === 'image_output',
isImageOutput(invocationCompleteAction.payload.data.result),
`Processor did not return an image output, got: ${invocationCompleteAction.payload.data.result}`
);
const { image_name } = invocationCompleteAction.payload.data.result.image;

View File

@@ -9,6 +9,7 @@ import {
selectControlAdapterById,
} from 'features/controlAdapters/store/controlAdaptersSlice';
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
import { isImageOutput } from 'features/nodes/types/common';
import { toast } from 'features/toast/toast';
import { t } from 'i18next';
import { imagesApi } from 'services/api/endpoints/images';
@@ -73,7 +74,7 @@ export const addControlNetImageProcessedListener = (startAppListening: AppStartL
);
// We still have to check the output type
if (invocationCompleteAction.payload.data.result.type === 'image_output') {
if (isImageOutput(invocationCompleteAction.payload.data.result)) {
const { image_name } = invocationCompleteAction.payload.data.result.image;
// Wait for the ImageDTO to be received

View File

@@ -1,7 +1,7 @@
import { createAction } from '@reduxjs/toolkit';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors';
import { imageToCompareChanged, selectionChanged } from 'features/gallery/store/gallerySlice';
import { selectionChanged } from 'features/gallery/store/gallerySlice';
import { imagesApi } from 'services/api/endpoints/images';
import type { ImageDTO } from 'services/api/types';
import { imagesSelectors } from 'services/api/util';
@@ -11,7 +11,6 @@ export const galleryImageClicked = createAction<{
shiftKey: boolean;
ctrlKey: boolean;
metaKey: boolean;
altKey: boolean;
}>('gallery/imageClicked');
/**
@@ -29,7 +28,7 @@ export const addGalleryImageClickedListener = (startAppListening: AppStartListen
startAppListening({
actionCreator: galleryImageClicked,
effect: async (action, { dispatch, getState }) => {
const { imageDTO, shiftKey, ctrlKey, metaKey, altKey } = action.payload;
const { imageDTO, shiftKey, ctrlKey, metaKey } = action.payload;
const state = getState();
const queryArgs = selectListImagesQueryArgs(state);
const { data: listImagesData } = imagesApi.endpoints.listImages.select(queryArgs)(state);
@@ -42,13 +41,7 @@ export const addGalleryImageClickedListener = (startAppListening: AppStartListen
const imageDTOs = imagesSelectors.selectAll(listImagesData);
const selection = state.gallery.selection;
if (altKey) {
if (state.gallery.imageToCompare?.image_name === imageDTO.image_name) {
dispatch(imageToCompareChanged(null));
} else {
dispatch(imageToCompareChanged(imageDTO));
}
} else if (shiftKey) {
if (shiftKey) {
const rangeEndImageName = imageDTO.image_name;
const lastSelectedImage = selection[selection.length - 1]?.image_name;
const lastClickedIndex = imageDTOs.findIndex((n) => n.image_name === lastSelectedImage);

View File

@@ -14,8 +14,7 @@ import {
rgLayerIPAdapterImageChanged,
} from 'features/controlLayers/store/controlLayersSlice';
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
import { isValidDrop } from 'features/dnd/util/isValidDrop';
import { imageSelected, imageToCompareChanged, isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice';
import { imageSelected } from 'features/gallery/store/gallerySlice';
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
import { selectOptimalDimension } from 'features/parameters/store/generationSlice';
import { imagesApi } from 'services/api/endpoints/images';
@@ -31,9 +30,6 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
effect: async (action, { dispatch, getState }) => {
const log = logger('dnd');
const { activeData, overData } = action.payload;
if (!isValidDrop(overData, activeData)) {
return;
}
if (activeData.payloadType === 'IMAGE_DTO') {
log.debug({ activeData, overData }, 'Image dropped');
@@ -54,7 +50,6 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
activeData.payload.imageDTO
) {
dispatch(imageSelected(activeData.payload.imageDTO));
dispatch(isImageViewerOpenChanged(true));
return;
}
@@ -187,18 +182,24 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
}
/**
* Image selected for compare
* TODO
* Image selection dropped on node image collection field
*/
if (
overData.actionType === 'SELECT_FOR_COMPARE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
const { imageDTO } = activeData.payload;
dispatch(imageToCompareChanged(imageDTO));
dispatch(isImageViewerOpenChanged(true));
return;
}
// if (
// overData.actionType === 'SET_MULTI_NODES_IMAGE' &&
// activeData.payloadType === 'IMAGE_DTO' &&
// activeData.payload.imageDTO
// ) {
// const { fieldName, nodeId } = overData.context;
// dispatch(
// fieldValueChanged({
// nodeId,
// fieldName,
// value: [activeData.payload.imageDTO],
// })
// );
// return;
// }
/**
* Image dropped on user board

View File

@@ -11,6 +11,7 @@ import {
} from 'features/gallery/store/gallerySlice';
import { IMAGE_CATEGORIES } from 'features/gallery/store/types';
import { $nodeExecutionStates, upsertExecutionState } from 'features/nodes/hooks/useExecutionState';
import { isImageOutput } from 'features/nodes/types/common';
import { zNodeStatus } from 'features/nodes/types/invocation';
import { CANVAS_OUTPUT } from 'features/nodes/util/graph/constants';
import { boardsApi } from 'services/api/endpoints/boards';
@@ -32,7 +33,7 @@ export const addInvocationCompleteEventListener = (startAppListening: AppStartLi
const { result, invocation_source_id } = data;
// This complete event has an associated image output
if (data.result.type === 'image_output' && !nodeTypeDenylist.includes(data.invocation.type)) {
if (isImageOutput(data.result) && !nodeTypeDenylist.includes(data.invocation.type)) {
const { image_name } = data.result.image;
const { canvas, gallery } = getState();

View File

@@ -3,7 +3,7 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'
import { parseify } from 'common/util/serialize';
import { workflowLoaded, workflowLoadRequested } from 'features/nodes/store/actions';
import { $templates } from 'features/nodes/store/nodesSlice';
import { $needsFit } from 'features/nodes/store/reactFlowInstance';
import { $flow } from 'features/nodes/store/reactFlowInstance';
import type { Templates } from 'features/nodes/store/types';
import { WorkflowMigrationError, WorkflowVersionError } from 'features/nodes/types/error';
import { graphToWorkflow } from 'features/nodes/util/workflow/graphToWorkflow';
@@ -65,7 +65,9 @@ export const addWorkflowLoadRequestedListener = (startAppListening: AppStartList
});
}
$needsFit.set(true);
requestAnimationFrame(() => {
$flow.get()?.fitView();
});
} catch (e) {
if (e instanceof WorkflowVersionError) {
// The workflow version was not recognized in the valid list of versions

View File

@@ -35,7 +35,6 @@ type IAIDndImageProps = FlexProps & {
draggableData?: TypesafeDraggableData;
dropLabel?: ReactNode;
isSelected?: boolean;
isSelectedForCompare?: boolean;
thumbnail?: boolean;
noContentFallback?: ReactElement;
useThumbailFallback?: boolean;
@@ -62,7 +61,6 @@ const IAIDndImage = (props: IAIDndImageProps) => {
draggableData,
dropLabel,
isSelected = false,
isSelectedForCompare = false,
thumbnail = false,
noContentFallback = defaultNoContentFallback,
uploadElement = defaultUploadElement,
@@ -167,11 +165,7 @@ const IAIDndImage = (props: IAIDndImageProps) => {
data-testid={dataTestId}
/>
{withMetadataOverlay && <ImageMetadataOverlay imageDTO={imageDTO} />}
<SelectionOverlay
isSelected={isSelected}
isSelectedForCompare={isSelectedForCompare}
isHovered={withHoverOverlay ? isHovered : false}
/>
<SelectionOverlay isSelected={isSelected} isHovered={withHoverOverlay ? isHovered : false} />
</Flex>
)}
{!imageDTO && !isUploadDisabled && (

View File

@@ -36,7 +36,7 @@ const IAIDroppable = (props: IAIDroppableProps) => {
pointerEvents={active ? 'auto' : 'none'}
>
<AnimatePresence>
{isValidDrop(data, active?.data.current) && <IAIDropOverlay isOver={isOver} label={dropLabel} />}
{isValidDrop(data, active) && <IAIDropOverlay isOver={isOver} label={dropLabel} />}
</AnimatePresence>
</Box>
);

View File

@@ -3,17 +3,10 @@ import { memo, useMemo } from 'react';
type Props = {
isSelected: boolean;
isSelectedForCompare: boolean;
isHovered: boolean;
};
const SelectionOverlay = ({ isSelected, isSelectedForCompare, isHovered }: Props) => {
const SelectionOverlay = ({ isSelected, isHovered }: Props) => {
const shadow = useMemo(() => {
if (isSelectedForCompare && isHovered) {
return 'hoverSelectedForCompare';
}
if (isSelectedForCompare && !isHovered) {
return 'selectedForCompare';
}
if (isSelected && isHovered) {
return 'hoverSelected';
}
@@ -24,7 +17,7 @@ const SelectionOverlay = ({ isSelected, isSelectedForCompare, isHovered }: Props
return 'hoverUnselected';
}
return undefined;
}, [isHovered, isSelected, isSelectedForCompare]);
}, [isHovered, isSelected]);
return (
<Box
className="selection-box"
@@ -34,7 +27,7 @@ const SelectionOverlay = ({ isSelected, isSelectedForCompare, isHovered }: Props
bottom={0}
insetInlineStart={0}
borderRadius="base"
opacity={isSelected || isSelectedForCompare ? 1 : 0.7}
opacity={isSelected ? 1 : 0.7}
transitionProperty="common"
transitionDuration="0.1s"
pointerEvents="none"

View File

@@ -1,21 +0,0 @@
import { useCallback, useMemo, useState } from 'react';
export const useBoolean = (initialValue: boolean) => {
const [isTrue, set] = useState(initialValue);
const setTrue = useCallback(() => set(true), []);
const setFalse = useCallback(() => set(false), []);
const toggle = useCallback(() => set((v) => !v), []);
const api = useMemo(
() => ({
isTrue,
set,
setTrue,
setFalse,
toggle,
}),
[isTrue, set, setTrue, setFalse, toggle]
);
return api;
};

View File

@@ -1,7 +1,3 @@
export const stopPropagation = (e: React.MouseEvent) => {
e.stopPropagation();
};
export const preventDefault = (e: React.MouseEvent) => {
e.preventDefault();
};

View File

@@ -4,7 +4,6 @@ import {
caLayerControlModeChanged,
caLayerImageChanged,
caLayerModelChanged,
caLayerProcessedImageChanged,
caLayerProcessorConfigChanged,
caOrIPALayerBeginEndStepPctChanged,
caOrIPALayerWeightChanged,
@@ -85,14 +84,6 @@ export const CALayerControlAdapterWrapper = memo(({ layerId }: Props) => {
[dispatch, layerId]
);
const onErrorLoadingImage = useCallback(() => {
dispatch(caLayerImageChanged({ layerId, imageDTO: null }));
}, [dispatch, layerId]);
const onErrorLoadingProcessedImage = useCallback(() => {
dispatch(caLayerProcessedImageChanged({ layerId, imageDTO: null }));
}, [dispatch, layerId]);
const droppableData = useMemo<CALayerImageDropData>(
() => ({
actionType: 'SET_CA_LAYER_IMAGE',
@@ -123,8 +114,6 @@ export const CALayerControlAdapterWrapper = memo(({ layerId }: Props) => {
onChangeImage={onChangeImage}
droppableData={droppableData}
postUploadAction={postUploadAction}
onErrorLoadingImage={onErrorLoadingImage}
onErrorLoadingProcessedImage={onErrorLoadingProcessedImage}
/>
);
});

View File

@@ -28,8 +28,6 @@ type Props = {
onChangeProcessorConfig: (processorConfig: ProcessorConfig | null) => void;
onChangeModel: (modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => void;
onChangeImage: (imageDTO: ImageDTO | null) => void;
onErrorLoadingImage: () => void;
onErrorLoadingProcessedImage: () => void;
droppableData: TypesafeDroppableData;
postUploadAction: PostUploadAction;
};
@@ -43,8 +41,6 @@ export const ControlAdapter = memo(
onChangeProcessorConfig,
onChangeModel,
onChangeImage,
onErrorLoadingImage,
onErrorLoadingProcessedImage,
droppableData,
postUploadAction,
}: Props) => {
@@ -95,8 +91,6 @@ export const ControlAdapter = memo(
onChangeImage={onChangeImage}
droppableData={droppableData}
postUploadAction={postUploadAction}
onErrorLoadingImage={onErrorLoadingImage}
onErrorLoadingProcessedImage={onErrorLoadingProcessedImage}
/>
</Flex>
</Flex>

View File

@@ -27,19 +27,10 @@ type Props = {
onChangeImage: (imageDTO: ImageDTO | null) => void;
droppableData: TypesafeDroppableData;
postUploadAction: PostUploadAction;
onErrorLoadingImage: () => void;
onErrorLoadingProcessedImage: () => void;
};
export const ControlAdapterImagePreview = memo(
({
controlAdapter,
onChangeImage,
droppableData,
postUploadAction,
onErrorLoadingImage,
onErrorLoadingProcessedImage,
}: Props) => {
({ controlAdapter, onChangeImage, droppableData, postUploadAction }: Props) => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId);
@@ -137,23 +128,10 @@ export const ControlAdapterImagePreview = memo(
controlAdapter.processorConfig !== null;
useEffect(() => {
if (!isConnected) {
return;
if (isConnected && (isErrorControlImage || isErrorProcessedControlImage)) {
handleResetControlImage();
}
if (isErrorControlImage) {
onErrorLoadingImage();
}
if (isErrorProcessedControlImage) {
onErrorLoadingProcessedImage();
}
}, [
handleResetControlImage,
isConnected,
isErrorControlImage,
isErrorProcessedControlImage,
onErrorLoadingImage,
onErrorLoadingProcessedImage,
]);
}, [handleResetControlImage, isConnected, isErrorControlImage, isErrorProcessedControlImage]);
return (
<Flex
@@ -189,7 +167,6 @@ export const ControlAdapterImagePreview = memo(
droppableData={droppableData}
imageDTO={processedControlImage}
isUploadDisabled={true}
onError={handleResetControlImage}
/>
</Box>

View File

@@ -4,35 +4,20 @@ import { createSelector } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { BRUSH_SPACING_PCT, MAX_BRUSH_SPACING_PX, MIN_BRUSH_SPACING_PX } from 'features/controlLayers/konva/constants';
import { setStageEventHandlers } from 'features/controlLayers/konva/events';
import { debouncedRenderers, renderers as normalRenderers } from 'features/controlLayers/konva/renderers';
import { useMouseEvents } from 'features/controlLayers/hooks/mouseEventHooks';
import {
$brushSize,
$brushSpacingPx,
$isDrawing,
$lastAddedPoint,
$lastCursorPos,
$lastMouseDownPos,
$selectedLayerId,
$selectedLayerType,
$shouldInvertBrushSizeScrollDirection,
$tool,
brushSizeChanged,
isRegionalGuidanceLayer,
layerBboxChanged,
layerTranslated,
rgLayerLineAdded,
rgLayerPointsAdded,
rgLayerRectAdded,
selectControlLayersSlice,
} from 'features/controlLayers/store/controlLayersSlice';
import type { AddLineArg, AddPointToLineArg, AddRectArg } from 'features/controlLayers/store/types';
import { debouncedRenderers, renderers as normalRenderers } from 'features/controlLayers/util/renderers';
import Konva from 'konva';
import type { IRect } from 'konva/lib/types';
import { clamp } from 'lodash-es';
import { memo, useCallback, useLayoutEffect, useMemo, useState } from 'react';
import { getImageDTO } from 'services/api/endpoints/images';
import { useDevicePixelRatio } from 'use-device-pixel-ratio';
import { v4 as uuidv4 } from 'uuid';
@@ -62,6 +47,7 @@ const useStageRenderer = (
const dispatch = useAppDispatch();
const state = useAppSelector((s) => s.controlLayers.present);
const tool = useStore($tool);
const mouseEventHandlers = useMouseEvents();
const lastCursorPos = useStore($lastCursorPos);
const lastMouseDownPos = useStore($lastMouseDownPos);
const selectedLayerIdColor = useAppSelector(selectSelectedLayerColor);
@@ -70,26 +56,6 @@ const useStageRenderer = (
const layerCount = useMemo(() => state.layers.length, [state.layers]);
const renderers = useMemo(() => (asPreview ? debouncedRenderers : normalRenderers), [asPreview]);
const dpr = useDevicePixelRatio({ round: false });
const shouldInvertBrushSizeScrollDirection = useAppSelector((s) => s.canvas.shouldInvertBrushSizeScrollDirection);
const brushSpacingPx = useMemo(
() => clamp(state.brushSize / BRUSH_SPACING_PCT, MIN_BRUSH_SPACING_PX, MAX_BRUSH_SPACING_PX),
[state.brushSize]
);
useLayoutEffect(() => {
$brushSize.set(state.brushSize);
$brushSpacingPx.set(brushSpacingPx);
$selectedLayerId.set(state.selectedLayerId);
$selectedLayerType.set(selectedLayerType);
$shouldInvertBrushSizeScrollDirection.set(shouldInvertBrushSizeScrollDirection);
}, [
brushSpacingPx,
selectedLayerIdColor,
selectedLayerType,
shouldInvertBrushSizeScrollDirection,
state.brushSize,
state.selectedLayerId,
]);
const onLayerPosChanged = useCallback(
(layerId: string, x: number, y: number) => {
@@ -105,31 +71,6 @@ const useStageRenderer = (
[dispatch]
);
const onRGLayerLineAdded = useCallback(
(arg: AddLineArg) => {
dispatch(rgLayerLineAdded(arg));
},
[dispatch]
);
const onRGLayerPointAddedToLine = useCallback(
(arg: AddPointToLineArg) => {
dispatch(rgLayerPointsAdded(arg));
},
[dispatch]
);
const onRGLayerRectAdded = useCallback(
(arg: AddRectArg) => {
dispatch(rgLayerRectAdded(arg));
},
[dispatch]
);
const onBrushSizeChanged = useCallback(
(size: number) => {
dispatch(brushSizeChanged(size));
},
[dispatch]
);
useLayoutEffect(() => {
log.trace('Initializing stage');
if (!container) {
@@ -147,29 +88,21 @@ const useStageRenderer = (
if (asPreview) {
return;
}
const cleanup = setStageEventHandlers({
stage,
$tool,
$isDrawing,
$lastMouseDownPos,
$lastCursorPos,
$lastAddedPoint,
$brushSize,
$brushSpacingPx,
$selectedLayerId,
$selectedLayerType,
$shouldInvertBrushSizeScrollDirection,
onRGLayerLineAdded,
onRGLayerPointAddedToLine,
onRGLayerRectAdded,
onBrushSizeChanged,
});
stage.on('mousedown', mouseEventHandlers.onMouseDown);
stage.on('mouseup', mouseEventHandlers.onMouseUp);
stage.on('mousemove', mouseEventHandlers.onMouseMove);
stage.on('mouseleave', mouseEventHandlers.onMouseLeave);
stage.on('wheel', mouseEventHandlers.onMouseWheel);
return () => {
log.trace('Removing stage listeners');
cleanup();
log.trace('Cleaning up stage listeners');
stage.off('mousedown', mouseEventHandlers.onMouseDown);
stage.off('mouseup', mouseEventHandlers.onMouseUp);
stage.off('mousemove', mouseEventHandlers.onMouseMove);
stage.off('mouseleave', mouseEventHandlers.onMouseLeave);
stage.off('wheel', mouseEventHandlers.onMouseWheel);
};
}, [asPreview, onBrushSizeChanged, onRGLayerLineAdded, onRGLayerPointAddedToLine, onRGLayerRectAdded, stage]);
}, [stage, asPreview, mouseEventHandlers]);
useLayoutEffect(() => {
log.trace('Updating stage dimensions');
@@ -227,7 +160,7 @@ const useStageRenderer = (
useLayoutEffect(() => {
log.trace('Rendering layers');
renderers.renderLayers(stage, state.layers, state.globalMaskLayerOpacity, tool, getImageDTO, onLayerPosChanged);
renderers.renderLayers(stage, state.layers, state.globalMaskLayerOpacity, tool, onLayerPosChanged);
}, [
stage,
state.layers,

View File

@@ -0,0 +1,233 @@
import { $ctrl, $meta } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { calculateNewBrushSize } from 'features/canvas/hooks/useCanvasZoom';
import {
$isDrawing,
$lastCursorPos,
$lastMouseDownPos,
$tool,
brushSizeChanged,
rgLayerLineAdded,
rgLayerPointsAdded,
rgLayerRectAdded,
} from 'features/controlLayers/store/controlLayersSlice';
import type Konva from 'konva';
import type { KonvaEventObject } from 'konva/lib/Node';
import type { Vector2d } from 'konva/lib/types';
import { clamp } from 'lodash-es';
import { useCallback, useMemo, useRef } from 'react';
const getIsFocused = (stage: Konva.Stage) => {
return stage.container().contains(document.activeElement);
};
const getIsMouseDown = (e: KonvaEventObject<MouseEvent>) => e.evt.buttons === 1;
const SNAP_PX = 10;
export const snapPosToStage = (pos: Vector2d, stage: Konva.Stage) => {
const snappedPos = { ...pos };
// Get the normalized threshold for snapping to the edge of the stage
const thresholdX = SNAP_PX / stage.scaleX();
const thresholdY = SNAP_PX / stage.scaleY();
const stageWidth = stage.width() / stage.scaleX();
const stageHeight = stage.height() / stage.scaleY();
// Snap to the edge of the stage if within threshold
if (pos.x - thresholdX < 0) {
snappedPos.x = 0;
} else if (pos.x + thresholdX > stageWidth) {
snappedPos.x = Math.floor(stageWidth);
}
if (pos.y - thresholdY < 0) {
snappedPos.y = 0;
} else if (pos.y + thresholdY > stageHeight) {
snappedPos.y = Math.floor(stageHeight);
}
return snappedPos;
};
export const getScaledFlooredCursorPosition = (stage: Konva.Stage) => {
const pointerPosition = stage.getPointerPosition();
const stageTransform = stage.getAbsoluteTransform().copy();
if (!pointerPosition) {
return;
}
const scaledCursorPosition = stageTransform.invert().point(pointerPosition);
return {
x: Math.floor(scaledCursorPosition.x),
y: Math.floor(scaledCursorPosition.y),
};
};
const syncCursorPos = (stage: Konva.Stage): Vector2d | null => {
const pos = getScaledFlooredCursorPosition(stage);
if (!pos) {
return null;
}
$lastCursorPos.set(pos);
return pos;
};
const BRUSH_SPACING_PCT = 10;
const MIN_BRUSH_SPACING_PX = 5;
const MAX_BRUSH_SPACING_PX = 15;
export const useMouseEvents = () => {
const dispatch = useAppDispatch();
const selectedLayerId = useAppSelector((s) => s.controlLayers.present.selectedLayerId);
const selectedLayerType = useAppSelector((s) => {
const selectedLayer = s.controlLayers.present.layers.find((l) => l.id === s.controlLayers.present.selectedLayerId);
if (!selectedLayer) {
return null;
}
return selectedLayer.type;
});
const tool = useStore($tool);
const lastCursorPosRef = useRef<[number, number] | null>(null);
const shouldInvertBrushSizeScrollDirection = useAppSelector((s) => s.canvas.shouldInvertBrushSizeScrollDirection);
const brushSize = useAppSelector((s) => s.controlLayers.present.brushSize);
const brushSpacingPx = useMemo(
() => clamp(brushSize / BRUSH_SPACING_PCT, MIN_BRUSH_SPACING_PX, MAX_BRUSH_SPACING_PX),
[brushSize]
);
const onMouseDown = useCallback(
(e: KonvaEventObject<MouseEvent>) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const pos = syncCursorPos(stage);
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
if (tool === 'brush' || tool === 'eraser') {
dispatch(
rgLayerLineAdded({
layerId: selectedLayerId,
points: [pos.x, pos.y, pos.x, pos.y],
tool,
})
);
$isDrawing.set(true);
$lastMouseDownPos.set(pos);
} else if (tool === 'rect') {
$lastMouseDownPos.set(snapPosToStage(pos, stage));
}
},
[dispatch, selectedLayerId, selectedLayerType, tool]
);
const onMouseUp = useCallback(
(e: KonvaEventObject<MouseEvent>) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const pos = $lastCursorPos.get();
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
const lastPos = $lastMouseDownPos.get();
const tool = $tool.get();
if (lastPos && selectedLayerId && tool === 'rect') {
const snappedPos = snapPosToStage(pos, stage);
dispatch(
rgLayerRectAdded({
layerId: selectedLayerId,
rect: {
x: Math.min(snappedPos.x, lastPos.x),
y: Math.min(snappedPos.y, lastPos.y),
width: Math.abs(snappedPos.x - lastPos.x),
height: Math.abs(snappedPos.y - lastPos.y),
},
})
);
}
$isDrawing.set(false);
$lastMouseDownPos.set(null);
},
[dispatch, selectedLayerId, selectedLayerType]
);
const onMouseMove = useCallback(
(e: KonvaEventObject<MouseEvent>) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const pos = syncCursorPos(stage);
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
if (getIsFocused(stage) && getIsMouseDown(e) && (tool === 'brush' || tool === 'eraser')) {
if ($isDrawing.get()) {
// Continue the last line
if (lastCursorPosRef.current) {
// Dispatching redux events impacts perf substantially - using brush spacing keeps dispatches to a reasonable number
if (Math.hypot(lastCursorPosRef.current[0] - pos.x, lastCursorPosRef.current[1] - pos.y) < brushSpacingPx) {
return;
}
}
lastCursorPosRef.current = [pos.x, pos.y];
dispatch(rgLayerPointsAdded({ layerId: selectedLayerId, point: lastCursorPosRef.current }));
} else {
// Start a new line
dispatch(rgLayerLineAdded({ layerId: selectedLayerId, points: [pos.x, pos.y, pos.x, pos.y], tool }));
}
$isDrawing.set(true);
}
},
[brushSpacingPx, dispatch, selectedLayerId, selectedLayerType, tool]
);
const onMouseLeave = useCallback(
(e: KonvaEventObject<MouseEvent>) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const pos = syncCursorPos(stage);
$isDrawing.set(false);
$lastCursorPos.set(null);
$lastMouseDownPos.set(null);
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
if (getIsFocused(stage) && getIsMouseDown(e) && (tool === 'brush' || tool === 'eraser')) {
dispatch(rgLayerPointsAdded({ layerId: selectedLayerId, point: [pos.x, pos.y] }));
}
},
[selectedLayerId, selectedLayerType, tool, dispatch]
);
const onMouseWheel = useCallback(
(e: KonvaEventObject<WheelEvent>) => {
e.evt.preventDefault();
if (selectedLayerType !== 'regional_guidance_layer' || (tool !== 'brush' && tool !== 'eraser')) {
return;
}
// checking for ctrl key is pressed or not,
// so that brush size can be controlled using ctrl + scroll up/down
// Invert the delta if the property is set to true
let delta = e.evt.deltaY;
if (shouldInvertBrushSizeScrollDirection) {
delta = -delta;
}
if ($ctrl.get() || $meta.get()) {
dispatch(brushSizeChanged(calculateNewBrushSize(brushSize, delta)));
}
},
[selectedLayerType, tool, shouldInvertBrushSizeScrollDirection, dispatch, brushSize]
);
const handlers = useMemo(
() => ({ onMouseDown, onMouseUp, onMouseMove, onMouseLeave, onMouseWheel }),
[onMouseDown, onMouseUp, onMouseMove, onMouseLeave, onMouseWheel]
);
return handlers;
};

View File

@@ -1,36 +0,0 @@
/**
* A transparency checker pattern image.
* This is invokeai/frontend/web/public/assets/images/transparent_bg.png as a dataURL
*/
export const TRANSPARENCY_CHECKER_PATTERN =
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAIAAAAC64paAAAEsmlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4KPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS41LjAiPgogPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iCiAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyIKICAgIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIKICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIKICAgIHhtbG5zOnhtcE1NPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvbW0vIgogICAgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIKICAgZXhpZjpQaXhlbFhEaW1lbnNpb249IjIwIgogICBleGlmOlBpeGVsWURpbWVuc2lvbj0iMjAiCiAgIGV4aWY6Q29sb3JTcGFjZT0iMSIKICAgdGlmZjpJbWFnZVdpZHRoPSIyMCIKICAgdGlmZjpJbWFnZUxlbmd0aD0iMjAiCiAgIHRpZmY6UmVzb2x1dGlvblVuaXQ9IjIiCiAgIHRpZmY6WFJlc29sdXRpb249IjMwMC8xIgogICB0aWZmOllSZXNvbHV0aW9uPSIzMDAvMSIKICAgcGhvdG9zaG9wOkNvbG9yTW9kZT0iMyIKICAgcGhvdG9zaG9wOklDQ1Byb2ZpbGU9InNSR0IgSUVDNjE5NjYtMi4xIgogICB4bXA6TW9kaWZ5RGF0ZT0iMjAyNC0wNC0yM1QwODoyMDo0NysxMDowMCIKICAgeG1wOk1ldGFkYXRhRGF0ZT0iMjAyNC0wNC0yM1QwODoyMDo0NysxMDowMCI+CiAgIDx4bXBNTTpIaXN0b3J5PgogICAgPHJkZjpTZXE+CiAgICAgPHJkZjpsaQogICAgICBzdEV2dDphY3Rpb249InByb2R1Y2VkIgogICAgICBzdEV2dDpzb2Z0d2FyZUFnZW50PSJBZmZpbml0eSBQaG90byAxLjEwLjgiCiAgICAgIHN0RXZ0OndoZW49IjIwMjQtMDQtMjNUMDg6MjA6NDcrMTA6MDAiLz4KICAgIDwvcmRmOlNlcT4KICAgPC94bXBNTTpIaXN0b3J5PgogIDwvcmRmOkRlc2NyaXB0aW9uPgogPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KPD94cGFja2V0IGVuZD0iciI/Pn9pdVgAAAGBaUNDUHNSR0IgSUVDNjE5NjYtMi4xAAAokXWR3yuDURjHP5uJmKghFy6WxpVpqMWNMgm1tGbKr5vt3S+1d3t73y3JrXKrKHHj1wV/AbfKtVJESq53TdywXs9rakv2nJ7zfM73nOfpnOeAPZJRVMPhAzWb18NTAffC4pK7oYiDTjpw4YgqhjYeCgWpaR8P2Kx457Vq1T73rzXHE4YCtkbhMUXT88LTwsG1vGbxrnC7ko7Ghc+F+3W5oPC9pcfKXLQ4VeYvi/VIeALsbcLuVBXHqlhJ66qwvByPmikov/exXuJMZOfnJPaId2MQZooAbmaYZAI/g4zK7MfLEAOyoka+7yd/lpzkKjJrrKOzSoo0efpFLUj1hMSk6AkZGdat/v/tq5EcHipXdwag/sU033qhYQdK26b5eWyapROoe4arbCU/dwQj76JvVzTPIbRuwsV1RYvtweUWdD1pUT36I9WJ25NJeD2DlkVw3ULTcrlnv/ucPkJkQ77qBvYPoE/Ot658AxagZ8FoS/a7AAAACXBIWXMAAC4jAAAuIwF4pT92AAAAL0lEQVQ4jWM8ffo0A25gYmKCR5YJjxxBMKp5ZGhm/P//Px7pM2fO0MrmUc0jQzMAB2EIhZC3pUYAAAAASUVORK5CYII=';
/**
* The color of a bounding box stroke when its object is selected.
*/
export const BBOX_SELECTED_STROKE = 'rgba(78, 190, 255, 1)';
/**
* The inner border color for the brush preview.
*/
export const BRUSH_BORDER_INNER_COLOR = 'rgba(0,0,0,1)';
/**
* The outer border color for the brush preview.
*/
export const BRUSH_BORDER_OUTER_COLOR = 'rgba(255,255,255,0.8)';
/**
* The target spacing of individual points of brush strokes, as a percentage of the brush size.
*/
export const BRUSH_SPACING_PCT = 10;
/**
* The minimum brush spacing in pixels.
*/
export const MIN_BRUSH_SPACING_PX = 5;
/**
* The maximum brush spacing in pixels.
*/
export const MAX_BRUSH_SPACING_PX = 15;

View File

@@ -1,201 +0,0 @@
import { calculateNewBrushSize } from 'features/canvas/hooks/useCanvasZoom';
import {
getIsFocused,
getIsMouseDown,
getScaledFlooredCursorPosition,
snapPosToStage,
} from 'features/controlLayers/konva/util';
import type { AddLineArg, AddPointToLineArg, AddRectArg, Layer, Tool } from 'features/controlLayers/store/types';
import type Konva from 'konva';
import type { Vector2d } from 'konva/lib/types';
import type { WritableAtom } from 'nanostores';
import { TOOL_PREVIEW_LAYER_ID } from './naming';
type SetStageEventHandlersArg = {
stage: Konva.Stage;
$tool: WritableAtom<Tool>;
$isDrawing: WritableAtom<boolean>;
$lastMouseDownPos: WritableAtom<Vector2d | null>;
$lastCursorPos: WritableAtom<Vector2d | null>;
$lastAddedPoint: WritableAtom<Vector2d | null>;
$brushSize: WritableAtom<number>;
$brushSpacingPx: WritableAtom<number>;
$selectedLayerId: WritableAtom<string | null>;
$selectedLayerType: WritableAtom<Layer['type'] | null>;
$shouldInvertBrushSizeScrollDirection: WritableAtom<boolean>;
onRGLayerLineAdded: (arg: AddLineArg) => void;
onRGLayerPointAddedToLine: (arg: AddPointToLineArg) => void;
onRGLayerRectAdded: (arg: AddRectArg) => void;
onBrushSizeChanged: (size: number) => void;
};
const syncCursorPos = (stage: Konva.Stage, $lastCursorPos: WritableAtom<Vector2d | null>) => {
const pos = getScaledFlooredCursorPosition(stage);
if (!pos) {
return null;
}
$lastCursorPos.set(pos);
return pos;
};
export const setStageEventHandlers = ({
stage,
$tool,
$isDrawing,
$lastMouseDownPos,
$lastCursorPos,
$lastAddedPoint,
$brushSize,
$brushSpacingPx,
$selectedLayerId,
$selectedLayerType,
$shouldInvertBrushSizeScrollDirection,
onRGLayerLineAdded,
onRGLayerPointAddedToLine,
onRGLayerRectAdded,
onBrushSizeChanged,
}: SetStageEventHandlersArg): (() => void) => {
stage.on('mouseenter', (e) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const tool = $tool.get();
stage.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)?.visible(tool === 'brush' || tool === 'eraser');
});
stage.on('mousedown', (e) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const tool = $tool.get();
const pos = syncCursorPos(stage, $lastCursorPos);
const selectedLayerId = $selectedLayerId.get();
const selectedLayerType = $selectedLayerType.get();
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
if (tool === 'brush' || tool === 'eraser') {
onRGLayerLineAdded({
layerId: selectedLayerId,
points: [pos.x, pos.y, pos.x, pos.y],
tool,
});
$isDrawing.set(true);
$lastMouseDownPos.set(pos);
} else if (tool === 'rect') {
$lastMouseDownPos.set(snapPosToStage(pos, stage));
}
});
stage.on('mouseup', (e) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const pos = $lastCursorPos.get();
const selectedLayerId = $selectedLayerId.get();
const selectedLayerType = $selectedLayerType.get();
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
const lastPos = $lastMouseDownPos.get();
const tool = $tool.get();
if (lastPos && selectedLayerId && tool === 'rect') {
const snappedPos = snapPosToStage(pos, stage);
onRGLayerRectAdded({
layerId: selectedLayerId,
rect: {
x: Math.min(snappedPos.x, lastPos.x),
y: Math.min(snappedPos.y, lastPos.y),
width: Math.abs(snappedPos.x - lastPos.x),
height: Math.abs(snappedPos.y - lastPos.y),
},
});
}
$isDrawing.set(false);
$lastMouseDownPos.set(null);
});
stage.on('mousemove', (e) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const tool = $tool.get();
const pos = syncCursorPos(stage, $lastCursorPos);
const selectedLayerId = $selectedLayerId.get();
const selectedLayerType = $selectedLayerType.get();
stage.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)?.visible(tool === 'brush' || tool === 'eraser');
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
if (getIsFocused(stage) && getIsMouseDown(e) && (tool === 'brush' || tool === 'eraser')) {
if ($isDrawing.get()) {
// Continue the last line
const lastAddedPoint = $lastAddedPoint.get();
if (lastAddedPoint) {
// Dispatching redux events impacts perf substantially - using brush spacing keeps dispatches to a reasonable number
if (Math.hypot(lastAddedPoint.x - pos.x, lastAddedPoint.y - pos.y) < $brushSpacingPx.get()) {
return;
}
}
$lastAddedPoint.set({ x: pos.x, y: pos.y });
onRGLayerPointAddedToLine({ layerId: selectedLayerId, point: [pos.x, pos.y] });
} else {
// Start a new line
onRGLayerLineAdded({ layerId: selectedLayerId, points: [pos.x, pos.y, pos.x, pos.y], tool });
}
$isDrawing.set(true);
}
});
stage.on('mouseleave', (e) => {
const stage = e.target.getStage();
if (!stage) {
return;
}
const pos = syncCursorPos(stage, $lastCursorPos);
$isDrawing.set(false);
$lastCursorPos.set(null);
$lastMouseDownPos.set(null);
const selectedLayerId = $selectedLayerId.get();
const selectedLayerType = $selectedLayerType.get();
const tool = $tool.get();
stage.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)?.visible(false);
if (!pos || !selectedLayerId || selectedLayerType !== 'regional_guidance_layer') {
return;
}
if (getIsFocused(stage) && getIsMouseDown(e) && (tool === 'brush' || tool === 'eraser')) {
onRGLayerPointAddedToLine({ layerId: selectedLayerId, point: [pos.x, pos.y] });
}
});
stage.on('wheel', (e) => {
e.evt.preventDefault();
const selectedLayerType = $selectedLayerType.get();
const tool = $tool.get();
if (selectedLayerType !== 'regional_guidance_layer' || (tool !== 'brush' && tool !== 'eraser')) {
return;
}
// Invert the delta if the property is set to true
let delta = e.evt.deltaY;
if ($shouldInvertBrushSizeScrollDirection.get()) {
delta = -delta;
}
if (e.evt.ctrlKey || e.evt.metaKey) {
onBrushSizeChanged(calculateNewBrushSize($brushSize.get(), delta));
}
});
return () => stage.off('mousedown mouseup mousemove mouseenter mouseleave wheel');
};

View File

@@ -1,21 +0,0 @@
/**
* Konva filters
* https://konvajs.org/docs/filters/Custom_Filter.html
*/
/**
* Calculates the lightness (HSL) of a given pixel and sets the alpha channel to that value.
* This is useful for edge maps and other masks, to make the black areas transparent.
* @param imageData The image data to apply the filter to
*/
export const LightnessToAlphaFilter = (imageData: ImageData): void => {
const len = imageData.data.length / 4;
for (let i = 0; i < len; i++) {
const r = imageData.data[i * 4 + 0] as number;
const g = imageData.data[i * 4 + 1] as number;
const b = imageData.data[i * 4 + 2] as number;
const cMin = Math.min(r, g, b);
const cMax = Math.max(r, g, b);
imageData.data[i * 4 + 3] = (cMin + cMax) / 2;
}
};

View File

@@ -1,38 +0,0 @@
/**
* This file contains IDs, names, and ID getters for konva layers and objects.
*/
// IDs for singleton Konva layers and objects
export const TOOL_PREVIEW_LAYER_ID = 'tool_preview_layer';
export const TOOL_PREVIEW_BRUSH_GROUP_ID = 'tool_preview_layer.brush_group';
export const TOOL_PREVIEW_BRUSH_FILL_ID = 'tool_preview_layer.brush_fill';
export const TOOL_PREVIEW_BRUSH_BORDER_INNER_ID = 'tool_preview_layer.brush_border_inner';
export const TOOL_PREVIEW_BRUSH_BORDER_OUTER_ID = 'tool_preview_layer.brush_border_outer';
export const TOOL_PREVIEW_RECT_ID = 'tool_preview_layer.rect';
export const BACKGROUND_LAYER_ID = 'background_layer';
export const BACKGROUND_RECT_ID = 'background_layer.rect';
export const NO_LAYERS_MESSAGE_LAYER_ID = 'no_layers_message';
// Names for Konva layers and objects (comparable to CSS classes)
export const CA_LAYER_NAME = 'control_adapter_layer';
export const CA_LAYER_IMAGE_NAME = 'control_adapter_layer.image';
export const RG_LAYER_NAME = 'regional_guidance_layer';
export const RG_LAYER_LINE_NAME = 'regional_guidance_layer.line';
export const RG_LAYER_OBJECT_GROUP_NAME = 'regional_guidance_layer.object_group';
export const RG_LAYER_RECT_NAME = 'regional_guidance_layer.rect';
export const INITIAL_IMAGE_LAYER_ID = 'singleton_initial_image_layer';
export const INITIAL_IMAGE_LAYER_NAME = 'initial_image_layer';
export const INITIAL_IMAGE_LAYER_IMAGE_NAME = 'initial_image_layer.image';
export const LAYER_BBOX_NAME = 'layer.bbox';
export const COMPOSITING_RECT_NAME = 'compositing-rect';
// Getters for non-singleton layer and object IDs
export const getRGLayerId = (layerId: string) => `${RG_LAYER_NAME}_${layerId}`;
export const getRGLayerLineId = (layerId: string, lineId: string) => `${layerId}.line_${lineId}`;
export const getRGLayerRectId = (layerId: string, lineId: string) => `${layerId}.rect_${lineId}`;
export const getRGLayerObjectGroupId = (layerId: string, groupId: string) => `${layerId}.objectGroup_${groupId}`;
export const getLayerBboxId = (layerId: string) => `${layerId}.bbox`;
export const getCALayerId = (layerId: string) => `control_adapter_layer_${layerId}`;
export const getCALayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`;
export const getIILayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`;
export const getIPALayerId = (layerId: string) => `ip_adapter_layer_${layerId}`;

View File

@@ -1,67 +0,0 @@
import type Konva from 'konva';
import type { KonvaEventObject } from 'konva/lib/Node';
import type { Vector2d } from 'konva/lib/types';
//#region getScaledFlooredCursorPosition
/**
* Gets the scaled and floored cursor position on the stage. If the cursor is not currently over the stage, returns null.
* @param stage The konva stage
*/
export const getScaledFlooredCursorPosition = (stage: Konva.Stage): Vector2d | null => {
const pointerPosition = stage.getPointerPosition();
const stageTransform = stage.getAbsoluteTransform().copy();
if (!pointerPosition) {
return null;
}
const scaledCursorPosition = stageTransform.invert().point(pointerPosition);
return {
x: Math.floor(scaledCursorPosition.x),
y: Math.floor(scaledCursorPosition.y),
};
};
//#endregion
//#region snapPosToStage
/**
* Snaps a position to the edge of the stage if within a threshold of the edge
* @param pos The position to snap
* @param stage The konva stage
* @param snapPx The snap threshold in pixels
*/
export const snapPosToStage = (pos: Vector2d, stage: Konva.Stage, snapPx = 10): Vector2d => {
const snappedPos = { ...pos };
// Get the normalized threshold for snapping to the edge of the stage
const thresholdX = snapPx / stage.scaleX();
const thresholdY = snapPx / stage.scaleY();
const stageWidth = stage.width() / stage.scaleX();
const stageHeight = stage.height() / stage.scaleY();
// Snap to the edge of the stage if within threshold
if (pos.x - thresholdX < 0) {
snappedPos.x = 0;
} else if (pos.x + thresholdX > stageWidth) {
snappedPos.x = Math.floor(stageWidth);
}
if (pos.y - thresholdY < 0) {
snappedPos.y = 0;
} else if (pos.y + thresholdY > stageHeight) {
snappedPos.y = Math.floor(stageHeight);
}
return snappedPos;
};
//#endregion
//#region getIsMouseDown
/**
* Checks if the left mouse button is currently pressed
* @param e The konva event
*/
export const getIsMouseDown = (e: KonvaEventObject<MouseEvent>): boolean => e.evt.buttons === 1;
//#endregion
//#region getIsFocused
/**
* Checks if the stage is currently focused
* @param stage The konva stage
*/
export const getIsFocused = (stage: Konva.Stage): boolean => stage.container().contains(document.activeElement);
//#endregion

View File

@@ -4,14 +4,6 @@ import type { PersistConfig, RootState } from 'app/store/store';
import { moveBackward, moveForward, moveToBack, moveToFront } from 'common/util/arrayUtils';
import { deepClone } from 'common/util/deepClone';
import { roundDownToMultiple } from 'common/util/roundDownToMultiple';
import {
getCALayerId,
getIPALayerId,
getRGLayerId,
getRGLayerLineId,
getRGLayerRectId,
INITIAL_IMAGE_LAYER_ID,
} from 'features/controlLayers/konva/naming';
import type {
CLIPVisionModelV2,
ControlModeV2,
@@ -44,9 +36,6 @@ import { assert } from 'tsafe';
import { v4 as uuidv4 } from 'uuid';
import type {
AddLineArg,
AddPointToLineArg,
AddRectArg,
ControlAdapterLayer,
ControlLayersState,
DrawingTool,
@@ -503,11 +492,11 @@ export const controlLayersSlice = createSlice({
layer.bboxNeedsUpdate = true;
layer.uploadedMaskImage = null;
},
prepare: (payload: AddLineArg) => ({
prepare: (payload: { layerId: string; points: [number, number, number, number]; tool: DrawingTool }) => ({
payload: { ...payload, lineUuid: uuidv4() },
}),
},
rgLayerPointsAdded: (state, action: PayloadAction<AddPointToLineArg>) => {
rgLayerPointsAdded: (state, action: PayloadAction<{ layerId: string; point: [number, number] }>) => {
const { layerId, point } = action.payload;
const layer = selectRGLayerOrThrow(state, layerId);
const lastLine = layer.maskObjects.findLast(isLine);
@@ -540,7 +529,7 @@ export const controlLayersSlice = createSlice({
layer.bboxNeedsUpdate = true;
layer.uploadedMaskImage = null;
},
prepare: (payload: AddRectArg) => ({ payload: { ...payload, rectUuid: uuidv4() } }),
prepare: (payload: { layerId: string; rect: IRect }) => ({ payload: { ...payload, rectUuid: uuidv4() } }),
},
rgLayerMaskImageUploaded: (state, action: PayloadAction<{ layerId: string; imageDTO: ImageDTO }>) => {
const { layerId, imageDTO } = action.payload;
@@ -894,21 +883,45 @@ const migrateControlLayersState = (state: any): any => {
return state;
};
// Ephemeral interaction state
export const $isDrawing = atom(false);
export const $lastMouseDownPos = atom<Vector2d | null>(null);
export const $tool = atom<Tool>('brush');
export const $lastCursorPos = atom<Vector2d | null>(null);
export const $isPreviewVisible = atom(true);
export const $lastAddedPoint = atom<Vector2d | null>(null);
// Some nanostores that are manually synced to redux state to provide imperative access
// TODO(psyche): This is a hack, figure out another way to handle this...
export const $brushSize = atom<number>(0);
export const $brushSpacingPx = atom<number>(0);
export const $selectedLayerId = atom<string | null>(null);
export const $selectedLayerType = atom<Layer['type'] | null>(null);
export const $shouldInvertBrushSizeScrollDirection = atom(false);
// IDs for singleton Konva layers and objects
export const TOOL_PREVIEW_LAYER_ID = 'tool_preview_layer';
export const TOOL_PREVIEW_BRUSH_GROUP_ID = 'tool_preview_layer.brush_group';
export const TOOL_PREVIEW_BRUSH_FILL_ID = 'tool_preview_layer.brush_fill';
export const TOOL_PREVIEW_BRUSH_BORDER_INNER_ID = 'tool_preview_layer.brush_border_inner';
export const TOOL_PREVIEW_BRUSH_BORDER_OUTER_ID = 'tool_preview_layer.brush_border_outer';
export const TOOL_PREVIEW_RECT_ID = 'tool_preview_layer.rect';
export const BACKGROUND_LAYER_ID = 'background_layer';
export const BACKGROUND_RECT_ID = 'background_layer.rect';
export const NO_LAYERS_MESSAGE_LAYER_ID = 'no_layers_message';
// Names (aka classes) for Konva layers and objects
export const CA_LAYER_NAME = 'control_adapter_layer';
export const CA_LAYER_IMAGE_NAME = 'control_adapter_layer.image';
export const RG_LAYER_NAME = 'regional_guidance_layer';
export const RG_LAYER_LINE_NAME = 'regional_guidance_layer.line';
export const RG_LAYER_OBJECT_GROUP_NAME = 'regional_guidance_layer.object_group';
export const RG_LAYER_RECT_NAME = 'regional_guidance_layer.rect';
export const INITIAL_IMAGE_LAYER_ID = 'singleton_initial_image_layer';
export const INITIAL_IMAGE_LAYER_NAME = 'initial_image_layer';
export const INITIAL_IMAGE_LAYER_IMAGE_NAME = 'initial_image_layer.image';
export const LAYER_BBOX_NAME = 'layer.bbox';
export const COMPOSITING_RECT_NAME = 'compositing-rect';
// Getters for non-singleton layer and object IDs
export const getRGLayerId = (layerId: string) => `${RG_LAYER_NAME}_${layerId}`;
const getRGLayerLineId = (layerId: string, lineId: string) => `${layerId}.line_${lineId}`;
const getRGLayerRectId = (layerId: string, lineId: string) => `${layerId}.rect_${lineId}`;
export const getRGLayerObjectGroupId = (layerId: string, groupId: string) => `${layerId}.objectGroup_${groupId}`;
export const getLayerBboxId = (layerId: string) => `${layerId}.bbox`;
export const getCALayerId = (layerId: string) => `control_adapter_layer_${layerId}`;
export const getCALayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`;
export const getIILayerImageId = (layerId: string, imageName: string) => `${layerId}.image_${imageName}`;
export const getIPALayerId = (layerId: string) => `ip_adapter_layer_${layerId}`;
export const controlLayersPersistConfig: PersistConfig<ControlLayersState> = {
name: controlLayersSlice.name,

View File

@@ -17,7 +17,6 @@ import {
zParameterPositivePrompt,
zParameterStrength,
} from 'features/parameters/types/parameterSchemas';
import type { IRect } from 'konva/lib/types';
import { z } from 'zod';
const zTool = z.enum(['brush', 'eraser', 'move', 'rect']);
@@ -130,7 +129,3 @@ export type ControlLayersState = {
aspectRatio: AspectRatioState;
};
};
export type AddLineArg = { layerId: string; points: [number, number, number, number]; tool: DrawingTool };
export type AddPointToLineArg = { layerId: string; point: [number, number] };
export type AddRectArg = { layerId: string; rect: IRect };

View File

@@ -1,10 +1,11 @@
import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
import { imageDataToDataURL } from 'features/canvas/util/blobToDataURL';
import { RG_LAYER_OBJECT_GROUP_NAME } from 'features/controlLayers/store/controlLayersSlice';
import Konva from 'konva';
import type { IRect } from 'konva/lib/types';
import { assert } from 'tsafe';
import { RG_LAYER_OBJECT_GROUP_NAME } from './naming';
const GET_CLIENT_RECT_CONFIG = { skipTransform: true };
type Extents = {
minX: number;
@@ -13,13 +14,10 @@ type Extents = {
maxY: number;
};
const GET_CLIENT_RECT_CONFIG = { skipTransform: true };
//#region getImageDataBbox
/**
* Get the bounding box of an image.
* @param imageData The ImageData object to get the bounding box of.
* @returns The minimum and maximum x and y values of the image's bounding box, or null if the image has no pixels.
* @returns The minimum and maximum x and y values of the image's bounding box.
*/
const getImageDataBbox = (imageData: ImageData): Extents | null => {
const { data, width, height } = imageData;
@@ -53,9 +51,7 @@ const getImageDataBbox = (imageData: ImageData): Extents | null => {
return isEmpty ? null : { minX, minY, maxX, maxY };
};
//#endregion
//#region getIsolatedRGLayerClone
/**
* Clones a regional guidance konva layer onto an offscreen stage/canvas. This allows the pixel data for a given layer
* to be captured, manipulated or analyzed without interference from other layers.
@@ -92,9 +88,7 @@ const getIsolatedRGLayerClone = (layer: Konva.Layer): { stageClone: Konva.Stage;
return { stageClone, layerClone };
};
//#endregion
//#region getLayerBboxPixels
/**
* Get the bounding box of a regional prompt konva layer. This function has special handling for regional prompt layers.
* @param layer The konva layer to get the bounding box of.
@@ -143,9 +137,7 @@ export const getLayerBboxPixels = (layer: Konva.Layer, preview: boolean = false)
return correctedLayerBbox;
};
//#endregion
//#region getLayerBboxFast
/**
* Get the bounding box of a konva layer. This function is faster than `getLayerBboxPixels` but less accurate. It
* should only be used when there are no eraser strokes or shapes in the layer.
@@ -161,4 +153,3 @@ export const getLayerBboxFast = (layer: Konva.Layer): IRect => {
height: Math.floor(bbox.height),
};
};
//#endregion

View File

@@ -1,13 +1,7 @@
import { deepClone } from 'common/util/deepClone';
import { zModelIdentifierField } from 'features/nodes/types/common';
import { merge, omit } from 'lodash-es';
import type {
AnyInvocation,
BaseModelType,
ControlNetModelConfig,
ImageDTO,
T2IAdapterModelConfig,
} from 'services/api/types';
import type { BaseModelType, ControlNetModelConfig, Graph, ImageDTO, T2IAdapterModelConfig } from 'services/api/types';
import { z } from 'zod';
const zId = z.string().min(1);
@@ -153,7 +147,7 @@ const zBeginEndStepPct = z
const zControlAdapterBase = z.object({
id: zId,
weight: z.number().gte(-1).lte(2),
weight: z.number().gte(0).lte(1),
image: zImageWithDims.nullable(),
processedImage: zImageWithDims.nullable(),
processorConfig: zProcessorConfig.nullable(),
@@ -189,7 +183,7 @@ export const isIPMethodV2 = (v: unknown): v is IPMethodV2 => zIPMethodV2.safePar
export const zIPAdapterConfigV2 = z.object({
id: zId,
type: z.literal('ip_adapter'),
weight: z.number().gte(-1).lte(2),
weight: z.number().gte(0).lte(1),
method: zIPMethodV2,
image: zImageWithDims.nullable(),
model: zModelIdentifierField.nullable(),
@@ -222,7 +216,10 @@ type ProcessorData<T extends ProcessorTypeV2> = {
labelTKey: string;
descriptionTKey: string;
buildDefaults(baseModel?: BaseModelType): Extract<ProcessorConfig, { type: T }>;
buildNode(image: ImageWithDims, config: Extract<ProcessorConfig, { type: T }>): Extract<AnyInvocation, { type: T }>;
buildNode(
image: ImageWithDims,
config: Extract<ProcessorConfig, { type: T }>
): Extract<Graph['nodes'][string], { type: T }>;
};
const minDim = (image: ImageWithDims): number => Math.min(image.width, image.height);

View File

@@ -0,0 +1,66 @@
import { getStore } from 'app/store/nanostores/store';
import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
import { blobToDataURL } from 'features/canvas/util/blobToDataURL';
import { isRegionalGuidanceLayer, RG_LAYER_NAME } from 'features/controlLayers/store/controlLayersSlice';
import { renderers } from 'features/controlLayers/util/renderers';
import Konva from 'konva';
import { assert } from 'tsafe';
/**
* Get the blobs of all regional prompt layers. Only visible layers are returned.
* @param layerIds The IDs of the layers to get blobs for. If not provided, all regional prompt layers are used.
* @param preview Whether to open a new tab displaying each layer.
* @returns A map of layer IDs to blobs.
*/
export const getRegionalPromptLayerBlobs = async (
layerIds?: string[],
preview: boolean = false
): Promise<Record<string, Blob>> => {
const state = getStore().getState();
const { layers } = state.controlLayers.present;
const { width, height } = state.controlLayers.present.size;
const reduxLayers = layers.filter(isRegionalGuidanceLayer);
const container = document.createElement('div');
const stage = new Konva.Stage({ container, width, height });
renderers.renderLayers(stage, reduxLayers, 1, 'brush');
const konvaLayers = stage.find<Konva.Layer>(`.${RG_LAYER_NAME}`);
const blobs: Record<string, Blob> = {};
// First remove all layers
for (const layer of konvaLayers) {
layer.remove();
}
// Next render each layer to a blob
for (const layer of konvaLayers) {
if (layerIds && !layerIds.includes(layer.id())) {
continue;
}
const reduxLayer = reduxLayers.find((l) => l.id === layer.id());
assert(reduxLayer, `Redux layer ${layer.id()} not found`);
stage.add(layer);
const blob = await new Promise<Blob>((resolve) => {
stage.toBlob({
callback: (blob) => {
assert(blob, 'Blob is null');
resolve(blob);
},
});
});
if (preview) {
const base64 = await blobToDataURL(blob);
openBase64ImageInTab([
{
base64,
caption: `${reduxLayer.id}: ${reduxLayer.positivePrompt} / ${reduxLayer.negativePrompt}`,
},
]);
}
layer.remove();
blobs[layer.id()] = blob;
}
return blobs;
};

View File

@@ -1,7 +1,8 @@
import { getStore } from 'app/store/nanostores/store';
import { rgbaColorToString, rgbColorToString } from 'features/canvas/util/colorToString';
import { getLayerBboxFast, getLayerBboxPixels } from 'features/controlLayers/konva/bbox';
import { LightnessToAlphaFilter } from 'features/controlLayers/konva/filters';
import { getScaledFlooredCursorPosition, snapPosToStage } from 'features/controlLayers/hooks/mouseEventHooks';
import {
$tool,
BACKGROUND_LAYER_ID,
BACKGROUND_RECT_ID,
CA_LAYER_IMAGE_NAME,
@@ -13,6 +14,10 @@ import {
getRGLayerObjectGroupId,
INITIAL_IMAGE_LAYER_IMAGE_NAME,
INITIAL_IMAGE_LAYER_NAME,
isControlAdapterLayer,
isInitialImageLayer,
isRegionalGuidanceLayer,
isRenderableLayer,
LAYER_BBOX_NAME,
NO_LAYERS_MESSAGE_LAYER_ID,
RG_LAYER_LINE_NAME,
@@ -25,13 +30,6 @@ import {
TOOL_PREVIEW_BRUSH_GROUP_ID,
TOOL_PREVIEW_LAYER_ID,
TOOL_PREVIEW_RECT_ID,
} from 'features/controlLayers/konva/naming';
import { getScaledFlooredCursorPosition, snapPosToStage } from 'features/controlLayers/konva/util';
import {
isControlAdapterLayer,
isInitialImageLayer,
isRegionalGuidanceLayer,
isRenderableLayer,
} from 'features/controlLayers/store/controlLayersSlice';
import type {
ControlAdapterLayer,
@@ -42,46 +40,61 @@ import type {
VectorMaskLine,
VectorMaskRect,
} from 'features/controlLayers/store/types';
import { getLayerBboxFast, getLayerBboxPixels } from 'features/controlLayers/util/bbox';
import { t } from 'i18next';
import Konva from 'konva';
import type { IRect, Vector2d } from 'konva/lib/types';
import { debounce } from 'lodash-es';
import type { RgbColor } from 'react-colorful';
import type { ImageDTO } from 'services/api/types';
import { imagesApi } from 'services/api/endpoints/images';
import { assert } from 'tsafe';
import { v4 as uuidv4 } from 'uuid';
import {
BBOX_SELECTED_STROKE,
BRUSH_BORDER_INNER_COLOR,
BRUSH_BORDER_OUTER_COLOR,
TRANSPARENCY_CHECKER_PATTERN,
} from './constants';
const BBOX_SELECTED_STROKE = 'rgba(78, 190, 255, 1)';
const BRUSH_BORDER_INNER_COLOR = 'rgba(0,0,0,1)';
const BRUSH_BORDER_OUTER_COLOR = 'rgba(255,255,255,0.8)';
// This is invokeai/frontend/web/public/assets/images/transparent_bg.png as a dataURL
const STAGE_BG_DATAURL =
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAIAAAAC64paAAAEsmlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4KPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS41LjAiPgogPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iCiAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyIKICAgIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIKICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIKICAgIHhtbG5zOnhtcE1NPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvbW0vIgogICAgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIKICAgZXhpZjpQaXhlbFhEaW1lbnNpb249IjIwIgogICBleGlmOlBpeGVsWURpbWVuc2lvbj0iMjAiCiAgIGV4aWY6Q29sb3JTcGFjZT0iMSIKICAgdGlmZjpJbWFnZVdpZHRoPSIyMCIKICAgdGlmZjpJbWFnZUxlbmd0aD0iMjAiCiAgIHRpZmY6UmVzb2x1dGlvblVuaXQ9IjIiCiAgIHRpZmY6WFJlc29sdXRpb249IjMwMC8xIgogICB0aWZmOllSZXNvbHV0aW9uPSIzMDAvMSIKICAgcGhvdG9zaG9wOkNvbG9yTW9kZT0iMyIKICAgcGhvdG9zaG9wOklDQ1Byb2ZpbGU9InNSR0IgSUVDNjE5NjYtMi4xIgogICB4bXA6TW9kaWZ5RGF0ZT0iMjAyNC0wNC0yM1QwODoyMDo0NysxMDowMCIKICAgeG1wOk1ldGFkYXRhRGF0ZT0iMjAyNC0wNC0yM1QwODoyMDo0NysxMDowMCI+CiAgIDx4bXBNTTpIaXN0b3J5PgogICAgPHJkZjpTZXE+CiAgICAgPHJkZjpsaQogICAgICBzdEV2dDphY3Rpb249InByb2R1Y2VkIgogICAgICBzdEV2dDpzb2Z0d2FyZUFnZW50PSJBZmZpbml0eSBQaG90byAxLjEwLjgiCiAgICAgIHN0RXZ0OndoZW49IjIwMjQtMDQtMjNUMDg6MjA6NDcrMTA6MDAiLz4KICAgIDwvcmRmOlNlcT4KICAgPC94bXBNTTpIaXN0b3J5PgogIDwvcmRmOkRlc2NyaXB0aW9uPgogPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KPD94cGFja2V0IGVuZD0iciI/Pn9pdVgAAAGBaUNDUHNSR0IgSUVDNjE5NjYtMi4xAAAokXWR3yuDURjHP5uJmKghFy6WxpVpqMWNMgm1tGbKr5vt3S+1d3t73y3JrXKrKHHj1wV/AbfKtVJESq53TdywXs9rakv2nJ7zfM73nOfpnOeAPZJRVMPhAzWb18NTAffC4pK7oYiDTjpw4YgqhjYeCgWpaR8P2Kx457Vq1T73rzXHE4YCtkbhMUXT88LTwsG1vGbxrnC7ko7Ghc+F+3W5oPC9pcfKXLQ4VeYvi/VIeALsbcLuVBXHqlhJ66qwvByPmikov/exXuJMZOfnJPaId2MQZooAbmaYZAI/g4zK7MfLEAOyoka+7yd/lpzkKjJrrKOzSoo0efpFLUj1hMSk6AkZGdat/v/tq5EcHipXdwag/sU033qhYQdK26b5eWyapROoe4arbCU/dwQj76JvVzTPIbRuwsV1RYvtweUWdD1pUT36I9WJ25NJeD2DlkVw3ULTcrlnv/ucPkJkQ77qBvYPoE/Ot658AxagZ8FoS/a7AAAACXBIWXMAAC4jAAAuIwF4pT92AAAAL0lEQVQ4jWM8ffo0A25gYmKCR5YJjxxBMKp5ZGhm/P//Px7pM2fO0MrmUc0jQzMAB2EIhZC3pUYAAAAASUVORK5CYII=';
const mapId = (object: { id: string }): string => object.id;
const mapId = (object: { id: string }) => object.id;
/**
* Konva selection callback to select all renderable layers. This includes RG, CA and II layers.
*/
const selectRenderableLayers = (n: Konva.Node): boolean =>
const selectRenderableLayers = (n: Konva.Node) =>
n.name() === RG_LAYER_NAME || n.name() === CA_LAYER_NAME || n.name() === INITIAL_IMAGE_LAYER_NAME;
/**
* Konva selection callback to select RG mask objects. This includes lines and rects.
*/
const selectVectorMaskObjects = (node: Konva.Node): boolean => {
const selectVectorMaskObjects = (node: Konva.Node) => {
return node.name() === RG_LAYER_LINE_NAME || node.name() === RG_LAYER_RECT_NAME;
};
/**
* Creates the singleton tool preview layer and all its objects.
* @param stage The konva stage
* Creates the brush preview layer.
* @param stage The konva stage to render on.
* @returns The brush preview layer.
*/
const createToolPreviewLayer = (stage: Konva.Stage): Konva.Layer => {
const createToolPreviewLayer = (stage: Konva.Stage) => {
// Initialize the brush preview layer & add to the stage
const toolPreviewLayer = new Konva.Layer({ id: TOOL_PREVIEW_LAYER_ID, visible: false, listening: false });
stage.add(toolPreviewLayer);
// Add handlers to show/hide the brush preview layer
stage.on('mousemove', (e) => {
const tool = $tool.get();
e.target
.getStage()
?.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)
?.visible(tool === 'brush' || tool === 'eraser');
});
stage.on('mouseleave', (e) => {
e.target.getStage()?.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)?.visible(false);
});
stage.on('mouseenter', (e) => {
const tool = $tool.get();
e.target
.getStage()
?.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)
?.visible(tool === 'brush' || tool === 'eraser');
});
// Create the brush preview group & circles
const brushPreviewGroup = new Konva.Group({ id: TOOL_PREVIEW_BRUSH_GROUP_ID });
const brushPreviewFill = new Konva.Circle({
@@ -108,7 +121,7 @@ const createToolPreviewLayer = (stage: Konva.Stage): Konva.Layer => {
brushPreviewGroup.add(brushPreviewBorderOuter);
toolPreviewLayer.add(brushPreviewGroup);
// Create the rect preview - this is a rectangle drawn from the last mouse down position to the current cursor position
// Create the rect preview
const rectPreview = new Konva.Rect({ id: TOOL_PREVIEW_RECT_ID, listening: false, stroke: 'white', strokeWidth: 1 });
toolPreviewLayer.add(rectPreview);
@@ -117,14 +130,12 @@ const createToolPreviewLayer = (stage: Konva.Stage): Konva.Layer => {
/**
* Renders the brush preview for the selected tool.
* @param stage The konva stage
* @param tool The selected tool
* @param color The selected layer's color
* @param selectedLayerType The selected layer's type
* @param globalMaskLayerOpacity The global mask layer opacity
* @param cursorPos The cursor position
* @param lastMouseDownPos The position of the last mouse down event - used for the rect tool
* @param brushSize The brush size
* @param stage The konva stage to render on.
* @param tool The selected tool.
* @param color The selected layer's color.
* @param cursorPos The cursor position.
* @param lastMouseDownPos The position of the last mouse down event - used for the rect tool.
* @param brushSize The brush size.
*/
const renderToolPreview = (
stage: Konva.Stage,
@@ -135,7 +146,7 @@ const renderToolPreview = (
cursorPos: Vector2d | null,
lastMouseDownPos: Vector2d | null,
brushSize: number
): void => {
) => {
const layerCount = stage.find(selectRenderableLayers).length;
// Update the stage's pointer style
if (layerCount === 0) {
@@ -151,7 +162,7 @@ const renderToolPreview = (
// Move rect gets a crosshair
stage.container().style.cursor = 'crosshair';
} else {
// Else we hide the native cursor and use the konva-rendered brush preview
// Else we use the brush preview
stage.container().style.cursor = 'none';
}
@@ -216,29 +227,28 @@ const renderToolPreview = (
};
/**
* Creates a regional guidance layer.
* @param stage The konva stage
* @param layerState The regional guidance layer state
* @param onLayerPosChanged Callback for when the layer's position changes
* Creates a vector mask layer.
* @param stage The konva stage to attach the layer to.
* @param reduxLayer The redux layer to create the konva layer from.
* @param onLayerPosChanged Callback for when the layer's position changes.
*/
const createRGLayer = (
const createRegionalGuidanceLayer = (
stage: Konva.Stage,
layerState: RegionalGuidanceLayer,
reduxLayer: RegionalGuidanceLayer,
onLayerPosChanged?: (layerId: string, x: number, y: number) => void
): Konva.Layer => {
) => {
// This layer hasn't been added to the konva state yet
const konvaLayer = new Konva.Layer({
id: layerState.id,
id: reduxLayer.id,
name: RG_LAYER_NAME,
draggable: true,
dragDistance: 0,
});
// When a drag on the layer finishes, update the layer's position in state. During the drag, konva handles changing
// the position - we do not need to call this on the `dragmove` event.
// Create a `dragmove` listener for this layer
if (onLayerPosChanged) {
konvaLayer.on('dragend', function (e) {
onLayerPosChanged(layerState.id, Math.floor(e.target.x()), Math.floor(e.target.y()));
onLayerPosChanged(reduxLayer.id, Math.floor(e.target.x()), Math.floor(e.target.y()));
});
}
@@ -248,7 +258,7 @@ const createRGLayer = (
if (!cursorPos) {
return this.getAbsolutePosition();
}
// Prevent the user from dragging the layer out of the stage bounds by constaining the cursor position to the stage bounds
// Prevent the user from dragging the layer out of the stage bounds.
if (
cursorPos.x < 0 ||
cursorPos.x > stage.width() / stage.scaleX() ||
@@ -262,7 +272,7 @@ const createRGLayer = (
// The object group holds all of the layer's objects (e.g. lines and rects)
const konvaObjectGroup = new Konva.Group({
id: getRGLayerObjectGroupId(layerState.id, uuidv4()),
id: getRGLayerObjectGroupId(reduxLayer.id, uuidv4()),
name: RG_LAYER_OBJECT_GROUP_NAME,
listening: false,
});
@@ -274,51 +284,47 @@ const createRGLayer = (
};
/**
* Creates a konva line from a vector mask line.
* @param vectorMaskLine The vector mask line state
* @param layerObjectGroup The konva layer's object group to add the line to
* Creates a konva line from a redux vector mask line.
* @param reduxObject The redux object to create the konva line from.
* @param konvaGroup The konva group to add the line to.
*/
const createVectorMaskLine = (vectorMaskLine: VectorMaskLine, layerObjectGroup: Konva.Group): Konva.Line => {
const konvaLine = new Konva.Line({
id: vectorMaskLine.id,
key: vectorMaskLine.id,
const createVectorMaskLine = (reduxObject: VectorMaskLine, konvaGroup: Konva.Group): Konva.Line => {
const vectorMaskLine = new Konva.Line({
id: reduxObject.id,
key: reduxObject.id,
name: RG_LAYER_LINE_NAME,
strokeWidth: vectorMaskLine.strokeWidth,
strokeWidth: reduxObject.strokeWidth,
tension: 0,
lineCap: 'round',
lineJoin: 'round',
shadowForStrokeEnabled: false,
globalCompositeOperation: vectorMaskLine.tool === 'brush' ? 'source-over' : 'destination-out',
globalCompositeOperation: reduxObject.tool === 'brush' ? 'source-over' : 'destination-out',
listening: false,
});
layerObjectGroup.add(konvaLine);
return konvaLine;
konvaGroup.add(vectorMaskLine);
return vectorMaskLine;
};
/**
* Creates a konva rect from a vector mask rect.
* @param vectorMaskRect The vector mask rect state
* @param layerObjectGroup The konva layer's object group to add the line to
* Creates a konva rect from a redux vector mask rect.
* @param reduxObject The redux object to create the konva rect from.
* @param konvaGroup The konva group to add the rect to.
*/
const createVectorMaskRect = (vectorMaskRect: VectorMaskRect, layerObjectGroup: Konva.Group): Konva.Rect => {
const konvaRect = new Konva.Rect({
id: vectorMaskRect.id,
key: vectorMaskRect.id,
const createVectorMaskRect = (reduxObject: VectorMaskRect, konvaGroup: Konva.Group): Konva.Rect => {
const vectorMaskRect = new Konva.Rect({
id: reduxObject.id,
key: reduxObject.id,
name: RG_LAYER_RECT_NAME,
x: vectorMaskRect.x,
y: vectorMaskRect.y,
width: vectorMaskRect.width,
height: vectorMaskRect.height,
x: reduxObject.x,
y: reduxObject.y,
width: reduxObject.width,
height: reduxObject.height,
listening: false,
});
layerObjectGroup.add(konvaRect);
return konvaRect;
konvaGroup.add(vectorMaskRect);
return vectorMaskRect;
};
/**
* Creates the "compositing rect" for a layer.
* @param konvaLayer The konva layer
*/
const createCompositingRect = (konvaLayer: Konva.Layer): Konva.Rect => {
const compositingRect = new Konva.Rect({ name: COMPOSITING_RECT_NAME, listening: false });
konvaLayer.add(compositingRect);
@@ -326,41 +332,41 @@ const createCompositingRect = (konvaLayer: Konva.Layer): Konva.Rect => {
};
/**
* Renders a regional guidance layer.
* @param stage The konva stage
* @param layerState The regional guidance layer state
* @param globalMaskLayerOpacity The global mask layer opacity
* @param tool The current tool
* @param onLayerPosChanged Callback for when the layer's position changes
* Renders a vector mask layer.
* @param stage The konva stage to render on.
* @param reduxLayer The redux vector mask layer to render.
* @param reduxLayerIndex The index of the layer in the redux store.
* @param globalMaskLayerOpacity The opacity of the global mask layer.
* @param tool The current tool.
*/
const renderRGLayer = (
const renderRegionalGuidanceLayer = (
stage: Konva.Stage,
layerState: RegionalGuidanceLayer,
reduxLayer: RegionalGuidanceLayer,
globalMaskLayerOpacity: number,
tool: Tool,
onLayerPosChanged?: (layerId: string, x: number, y: number) => void
): void => {
const konvaLayer =
stage.findOne<Konva.Layer>(`#${layerState.id}`) ?? createRGLayer(stage, layerState, onLayerPosChanged);
stage.findOne<Konva.Layer>(`#${reduxLayer.id}`) ??
createRegionalGuidanceLayer(stage, reduxLayer, onLayerPosChanged);
// Update the layer's position and listening state
konvaLayer.setAttrs({
listening: tool === 'move', // The layer only listens when using the move tool - otherwise the stage is handling mouse events
x: Math.floor(layerState.x),
y: Math.floor(layerState.y),
x: Math.floor(reduxLayer.x),
y: Math.floor(reduxLayer.y),
});
// Convert the color to a string, stripping the alpha - the object group will handle opacity.
const rgbColor = rgbColorToString(layerState.previewColor);
const rgbColor = rgbColorToString(reduxLayer.previewColor);
const konvaObjectGroup = konvaLayer.findOne<Konva.Group>(`.${RG_LAYER_OBJECT_GROUP_NAME}`);
assert(konvaObjectGroup, `Object group not found for layer ${layerState.id}`);
assert(konvaObjectGroup, `Object group not found for layer ${reduxLayer.id}`);
// We use caching to handle "global" layer opacity, but caching is expensive and we should only do it when required.
let groupNeedsCache = false;
const objectIds = layerState.maskObjects.map(mapId);
// Destroy any objects that are no longer in the redux state
const objectIds = reduxLayer.maskObjects.map(mapId);
for (const objectNode of konvaObjectGroup.find(selectVectorMaskObjects)) {
if (!objectIds.includes(objectNode.id())) {
objectNode.destroy();
@@ -368,15 +374,15 @@ const renderRGLayer = (
}
}
for (const maskObject of layerState.maskObjects) {
if (maskObject.type === 'vector_mask_line') {
for (const reduxObject of reduxLayer.maskObjects) {
if (reduxObject.type === 'vector_mask_line') {
const vectorMaskLine =
stage.findOne<Konva.Line>(`#${maskObject.id}`) ?? createVectorMaskLine(maskObject, konvaObjectGroup);
stage.findOne<Konva.Line>(`#${reduxObject.id}`) ?? createVectorMaskLine(reduxObject, konvaObjectGroup);
// Only update the points if they have changed. The point values are never mutated, they are only added to the
// array, so checking the length is sufficient to determine if we need to re-cache.
if (vectorMaskLine.points().length !== maskObject.points.length) {
vectorMaskLine.points(maskObject.points);
if (vectorMaskLine.points().length !== reduxObject.points.length) {
vectorMaskLine.points(reduxObject.points);
groupNeedsCache = true;
}
// Only update the color if it has changed.
@@ -384,9 +390,9 @@ const renderRGLayer = (
vectorMaskLine.stroke(rgbColor);
groupNeedsCache = true;
}
} else if (maskObject.type === 'vector_mask_rect') {
} else if (reduxObject.type === 'vector_mask_rect') {
const konvaObject =
stage.findOne<Konva.Rect>(`#${maskObject.id}`) ?? createVectorMaskRect(maskObject, konvaObjectGroup);
stage.findOne<Konva.Rect>(`#${reduxObject.id}`) ?? createVectorMaskRect(reduxObject, konvaObjectGroup);
// Only update the color if it has changed.
if (konvaObject.fill() !== rgbColor) {
@@ -397,8 +403,8 @@ const renderRGLayer = (
}
// Only update layer visibility if it has changed.
if (konvaLayer.visible() !== layerState.isEnabled) {
konvaLayer.visible(layerState.isEnabled);
if (konvaLayer.visible() !== reduxLayer.isEnabled) {
konvaLayer.visible(reduxLayer.isEnabled);
groupNeedsCache = true;
}
@@ -422,7 +428,7 @@ const renderRGLayer = (
* Instead, with the special handling, the effect is as if you drew all the shapes at 100% opacity, flattened them to
* a single raster image, and _then_ applied the 50% opacity.
*/
if (layerState.isSelected && tool !== 'move') {
if (reduxLayer.isSelected && tool !== 'move') {
// We must clear the cache first so Konva will re-draw the group with the new compositing rect
if (konvaObjectGroup.isCached()) {
konvaObjectGroup.clearCache();
@@ -432,7 +438,7 @@ const renderRGLayer = (
compositingRect.setAttrs({
// The rect should be the size of the layer - use the fast method if we don't have a pixel-perfect bbox already
...(!layerState.bboxNeedsUpdate && layerState.bbox ? layerState.bbox : getLayerBboxFast(konvaLayer)),
...(!reduxLayer.bboxNeedsUpdate && reduxLayer.bbox ? reduxLayer.bbox : getLayerBboxFast(konvaLayer)),
fill: rgbColor,
opacity: globalMaskLayerOpacity,
// Draw this rect only where there are non-transparent pixels under it (e.g. the mask shapes)
@@ -453,14 +459,9 @@ const renderRGLayer = (
}
};
/**
* Creates an initial image konva layer.
* @param stage The konva stage
* @param layerState The initial image layer state
*/
const createIILayer = (stage: Konva.Stage, layerState: InitialImageLayer): Konva.Layer => {
const createInitialImageLayer = (stage: Konva.Stage, reduxLayer: InitialImageLayer): Konva.Layer => {
const konvaLayer = new Konva.Layer({
id: layerState.id,
id: reduxLayer.id,
name: INITIAL_IMAGE_LAYER_NAME,
imageSmoothingEnabled: true,
listening: false,
@@ -469,27 +470,20 @@ const createIILayer = (stage: Konva.Stage, layerState: InitialImageLayer): Konva
return konvaLayer;
};
/**
* Creates the konva image for an initial image layer.
* @param konvaLayer The konva layer
* @param imageEl The image element
*/
const createIILayerImage = (konvaLayer: Konva.Layer, imageEl: HTMLImageElement): Konva.Image => {
const createInitialImageLayerImage = (konvaLayer: Konva.Layer, image: HTMLImageElement): Konva.Image => {
const konvaImage = new Konva.Image({
name: INITIAL_IMAGE_LAYER_IMAGE_NAME,
image: imageEl,
image,
});
konvaLayer.add(konvaImage);
return konvaImage;
};
/**
* Updates an initial image layer's attributes (width, height, opacity, visibility).
* @param stage The konva stage
* @param konvaImage The konva image
* @param layerState The initial image layer state
*/
const updateIILayerImageAttrs = (stage: Konva.Stage, konvaImage: Konva.Image, layerState: InitialImageLayer): void => {
const updateInitialImageLayerImageAttrs = (
stage: Konva.Stage,
konvaImage: Konva.Image,
reduxLayer: InitialImageLayer
) => {
// Konva erroneously reports NaN for width and height when the stage is hidden. This causes errors when caching,
// but it doesn't seem to break anything.
// TODO(psyche): Investigate and report upstream.
@@ -498,55 +492,46 @@ const updateIILayerImageAttrs = (stage: Konva.Stage, konvaImage: Konva.Image, la
if (
konvaImage.width() !== newWidth ||
konvaImage.height() !== newHeight ||
konvaImage.visible() !== layerState.isEnabled
konvaImage.visible() !== reduxLayer.isEnabled
) {
konvaImage.setAttrs({
opacity: layerState.opacity,
opacity: reduxLayer.opacity,
scaleX: 1,
scaleY: 1,
width: stage.width() / stage.scaleX(),
height: stage.height() / stage.scaleY(),
visible: layerState.isEnabled,
visible: reduxLayer.isEnabled,
});
}
if (konvaImage.opacity() !== layerState.opacity) {
konvaImage.opacity(layerState.opacity);
if (konvaImage.opacity() !== reduxLayer.opacity) {
konvaImage.opacity(reduxLayer.opacity);
}
};
/**
* Update an initial image layer's image source when the image changes.
* @param stage The konva stage
* @param konvaLayer The konva layer
* @param layerState The initial image layer state
* @param getImageDTO A function to retrieve an image DTO from the server, used to update the image source
*/
const updateIILayerImageSource = async (
const updateInitialImageLayerImageSource = async (
stage: Konva.Stage,
konvaLayer: Konva.Layer,
layerState: InitialImageLayer,
getImageDTO: (imageName: string) => Promise<ImageDTO | null>
): Promise<void> => {
if (layerState.image) {
const imageName = layerState.image.name;
const imageDTO = await getImageDTO(imageName);
if (!imageDTO) {
return;
}
reduxLayer: InitialImageLayer
) => {
if (reduxLayer.image) {
const imageName = reduxLayer.image.name;
const req = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(imageName));
const imageDTO = await req.unwrap();
req.unsubscribe();
const imageEl = new Image();
const imageId = getIILayerImageId(layerState.id, imageName);
const imageId = getIILayerImageId(reduxLayer.id, imageName);
imageEl.onload = () => {
// Find the existing image or create a new one - must find using the name, bc the id may have just changed
const konvaImage =
konvaLayer.findOne<Konva.Image>(`.${INITIAL_IMAGE_LAYER_IMAGE_NAME}`) ??
createIILayerImage(konvaLayer, imageEl);
createInitialImageLayerImage(konvaLayer, imageEl);
// Update the image's attributes
konvaImage.setAttrs({
id: imageId,
image: imageEl,
});
updateIILayerImageAttrs(stage, konvaImage, layerState);
updateInitialImageLayerImageAttrs(stage, konvaImage, reduxLayer);
imageEl.id = imageId;
};
imageEl.src = imageDTO.image_url;
@@ -555,24 +540,14 @@ const updateIILayerImageSource = async (
}
};
/**
* Renders an initial image layer.
* @param stage The konva stage
* @param layerState The initial image layer state
* @param getImageDTO A function to retrieve an image DTO from the server, used to update the image source
*/
const renderIILayer = (
stage: Konva.Stage,
layerState: InitialImageLayer,
getImageDTO: (imageName: string) => Promise<ImageDTO | null>
): void => {
const konvaLayer = stage.findOne<Konva.Layer>(`#${layerState.id}`) ?? createIILayer(stage, layerState);
const renderInitialImageLayer = (stage: Konva.Stage, reduxLayer: InitialImageLayer) => {
const konvaLayer = stage.findOne<Konva.Layer>(`#${reduxLayer.id}`) ?? createInitialImageLayer(stage, reduxLayer);
const konvaImage = konvaLayer.findOne<Konva.Image>(`.${INITIAL_IMAGE_LAYER_IMAGE_NAME}`);
const canvasImageSource = konvaImage?.image();
let imageSourceNeedsUpdate = false;
if (canvasImageSource instanceof HTMLImageElement) {
const image = layerState.image;
if (image && canvasImageSource.id !== getCALayerImageId(layerState.id, image.name)) {
const image = reduxLayer.image;
if (image && canvasImageSource.id !== getCALayerImageId(reduxLayer.id, image.name)) {
imageSourceNeedsUpdate = true;
} else if (!image) {
imageSourceNeedsUpdate = true;
@@ -582,20 +557,15 @@ const renderIILayer = (
}
if (imageSourceNeedsUpdate) {
updateIILayerImageSource(stage, konvaLayer, layerState, getImageDTO);
updateInitialImageLayerImageSource(stage, konvaLayer, reduxLayer);
} else if (konvaImage) {
updateIILayerImageAttrs(stage, konvaImage, layerState);
updateInitialImageLayerImageAttrs(stage, konvaImage, reduxLayer);
}
};
/**
* Creates a control adapter layer.
* @param stage The konva stage
* @param layerState The control adapter layer state
*/
const createCALayer = (stage: Konva.Stage, layerState: ControlAdapterLayer): Konva.Layer => {
const createControlNetLayer = (stage: Konva.Stage, reduxLayer: ControlAdapterLayer): Konva.Layer => {
const konvaLayer = new Konva.Layer({
id: layerState.id,
id: reduxLayer.id,
name: CA_LAYER_NAME,
imageSmoothingEnabled: true,
listening: false,
@@ -604,53 +574,39 @@ const createCALayer = (stage: Konva.Stage, layerState: ControlAdapterLayer): Kon
return konvaLayer;
};
/**
* Creates a control adapter layer image.
* @param konvaLayer The konva layer
* @param imageEl The image element
*/
const createCALayerImage = (konvaLayer: Konva.Layer, imageEl: HTMLImageElement): Konva.Image => {
const createControlNetLayerImage = (konvaLayer: Konva.Layer, image: HTMLImageElement): Konva.Image => {
const konvaImage = new Konva.Image({
name: CA_LAYER_IMAGE_NAME,
image: imageEl,
image,
});
konvaLayer.add(konvaImage);
return konvaImage;
};
/**
* Updates the image source for a control adapter layer. This includes loading the image from the server and updating the konva image.
* @param stage The konva stage
* @param konvaLayer The konva layer
* @param layerState The control adapter layer state
* @param getImageDTO A function to retrieve an image DTO from the server, used to update the image source
*/
const updateCALayerImageSource = async (
const updateControlNetLayerImageSource = async (
stage: Konva.Stage,
konvaLayer: Konva.Layer,
layerState: ControlAdapterLayer,
getImageDTO: (imageName: string) => Promise<ImageDTO | null>
): Promise<void> => {
const image = layerState.controlAdapter.processedImage ?? layerState.controlAdapter.image;
reduxLayer: ControlAdapterLayer
) => {
const image = reduxLayer.controlAdapter.processedImage ?? reduxLayer.controlAdapter.image;
if (image) {
const imageName = image.name;
const imageDTO = await getImageDTO(imageName);
if (!imageDTO) {
return;
}
const req = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(imageName));
const imageDTO = await req.unwrap();
req.unsubscribe();
const imageEl = new Image();
const imageId = getCALayerImageId(layerState.id, imageName);
const imageId = getCALayerImageId(reduxLayer.id, imageName);
imageEl.onload = () => {
// Find the existing image or create a new one - must find using the name, bc the id may have just changed
const konvaImage =
konvaLayer.findOne<Konva.Image>(`.${CA_LAYER_IMAGE_NAME}`) ?? createCALayerImage(konvaLayer, imageEl);
konvaLayer.findOne<Konva.Image>(`.${CA_LAYER_IMAGE_NAME}`) ?? createControlNetLayerImage(konvaLayer, imageEl);
// Update the image's attributes
konvaImage.setAttrs({
id: imageId,
image: imageEl,
});
updateCALayerImageAttrs(stage, konvaImage, layerState);
updateControlNetLayerImageAttrs(stage, konvaImage, reduxLayer);
// Must cache after this to apply the filters
konvaImage.cache();
imageEl.id = imageId;
@@ -661,17 +617,11 @@ const updateCALayerImageSource = async (
}
};
/**
* Updates the image attributes for a control adapter layer's image (width, height, visibility, opacity, filters).
* @param stage The konva stage
* @param konvaImage The konva image
* @param layerState The control adapter layer state
*/
const updateCALayerImageAttrs = (
const updateControlNetLayerImageAttrs = (
stage: Konva.Stage,
konvaImage: Konva.Image,
layerState: ControlAdapterLayer
): void => {
reduxLayer: ControlAdapterLayer
) => {
let needsCache = false;
// Konva erroneously reports NaN for width and height when the stage is hidden. This causes errors when caching,
// but it doesn't seem to break anything.
@@ -682,47 +632,36 @@ const updateCALayerImageAttrs = (
if (
konvaImage.width() !== newWidth ||
konvaImage.height() !== newHeight ||
konvaImage.visible() !== layerState.isEnabled ||
hasFilter !== layerState.isFilterEnabled
konvaImage.visible() !== reduxLayer.isEnabled ||
hasFilter !== reduxLayer.isFilterEnabled
) {
konvaImage.setAttrs({
opacity: layerState.opacity,
opacity: reduxLayer.opacity,
scaleX: 1,
scaleY: 1,
width: stage.width() / stage.scaleX(),
height: stage.height() / stage.scaleY(),
visible: layerState.isEnabled,
filters: layerState.isFilterEnabled ? [LightnessToAlphaFilter] : [],
visible: reduxLayer.isEnabled,
filters: reduxLayer.isFilterEnabled ? [LightnessToAlphaFilter] : [],
});
needsCache = true;
}
if (konvaImage.opacity() !== layerState.opacity) {
konvaImage.opacity(layerState.opacity);
if (konvaImage.opacity() !== reduxLayer.opacity) {
konvaImage.opacity(reduxLayer.opacity);
}
if (needsCache) {
konvaImage.cache();
}
};
/**
* Renders a control adapter layer. If the layer doesn't already exist, it is created. Otherwise, the layer is updated
* with the current image source and attributes.
* @param stage The konva stage
* @param layerState The control adapter layer state
* @param getImageDTO A function to retrieve an image DTO from the server, used to update the image source
*/
const renderCALayer = (
stage: Konva.Stage,
layerState: ControlAdapterLayer,
getImageDTO: (imageName: string) => Promise<ImageDTO | null>
): void => {
const konvaLayer = stage.findOne<Konva.Layer>(`#${layerState.id}`) ?? createCALayer(stage, layerState);
const renderControlNetLayer = (stage: Konva.Stage, reduxLayer: ControlAdapterLayer) => {
const konvaLayer = stage.findOne<Konva.Layer>(`#${reduxLayer.id}`) ?? createControlNetLayer(stage, reduxLayer);
const konvaImage = konvaLayer.findOne<Konva.Image>(`.${CA_LAYER_IMAGE_NAME}`);
const canvasImageSource = konvaImage?.image();
let imageSourceNeedsUpdate = false;
if (canvasImageSource instanceof HTMLImageElement) {
const image = layerState.controlAdapter.processedImage ?? layerState.controlAdapter.image;
if (image && canvasImageSource.id !== getCALayerImageId(layerState.id, image.name)) {
const image = reduxLayer.controlAdapter.processedImage ?? reduxLayer.controlAdapter.image;
if (image && canvasImageSource.id !== getCALayerImageId(reduxLayer.id, image.name)) {
imageSourceNeedsUpdate = true;
} else if (!image) {
imageSourceNeedsUpdate = true;
@@ -732,46 +671,44 @@ const renderCALayer = (
}
if (imageSourceNeedsUpdate) {
updateCALayerImageSource(stage, konvaLayer, layerState, getImageDTO);
updateControlNetLayerImageSource(stage, konvaLayer, reduxLayer);
} else if (konvaImage) {
updateCALayerImageAttrs(stage, konvaImage, layerState);
updateControlNetLayerImageAttrs(stage, konvaImage, reduxLayer);
}
};
/**
* Renders the layers on the stage.
* @param stage The konva stage
* @param layerStates Array of all layer states
* @param globalMaskLayerOpacity The global mask layer opacity
* @param tool The current tool
* @param getImageDTO A function to retrieve an image DTO from the server, used to update the image source
* @param onLayerPosChanged Callback for when the layer's position changes
* @param stage The konva stage to render on.
* @param reduxLayers Array of the layers from the redux store.
* @param layerOpacity The opacity of the layer.
* @param onLayerPosChanged Callback for when the layer's position changes. This is optional to allow for offscreen rendering.
* @returns
*/
const renderLayers = (
stage: Konva.Stage,
layerStates: Layer[],
reduxLayers: Layer[],
globalMaskLayerOpacity: number,
tool: Tool,
getImageDTO: (imageName: string) => Promise<ImageDTO | null>,
onLayerPosChanged?: (layerId: string, x: number, y: number) => void
): void => {
const layerIds = layerStates.filter(isRenderableLayer).map(mapId);
) => {
const reduxLayerIds = reduxLayers.filter(isRenderableLayer).map(mapId);
// Remove un-rendered layers
for (const konvaLayer of stage.find<Konva.Layer>(selectRenderableLayers)) {
if (!layerIds.includes(konvaLayer.id())) {
if (!reduxLayerIds.includes(konvaLayer.id())) {
konvaLayer.destroy();
}
}
for (const layer of layerStates) {
if (isRegionalGuidanceLayer(layer)) {
renderRGLayer(stage, layer, globalMaskLayerOpacity, tool, onLayerPosChanged);
for (const reduxLayer of reduxLayers) {
if (isRegionalGuidanceLayer(reduxLayer)) {
renderRegionalGuidanceLayer(stage, reduxLayer, globalMaskLayerOpacity, tool, onLayerPosChanged);
}
if (isControlAdapterLayer(layer)) {
renderCALayer(stage, layer, getImageDTO);
if (isControlAdapterLayer(reduxLayer)) {
renderControlNetLayer(stage, reduxLayer);
}
if (isInitialImageLayer(layer)) {
renderIILayer(stage, layer, getImageDTO);
if (isInitialImageLayer(reduxLayer)) {
renderInitialImageLayer(stage, reduxLayer);
}
// IP Adapter layers are not rendered
}
@@ -779,12 +716,13 @@ const renderLayers = (
/**
* Creates a bounding box rect for a layer.
* @param layerState The layer state for the layer to create the bounding box for
* @param konvaLayer The konva layer to attach the bounding box to
* @param reduxLayer The redux layer to create the bounding box for.
* @param konvaLayer The konva layer to attach the bounding box to.
* @param onBboxMouseDown Callback for when the bounding box is clicked.
*/
const createBboxRect = (layerState: Layer, konvaLayer: Konva.Layer): Konva.Rect => {
const createBboxRect = (reduxLayer: Layer, konvaLayer: Konva.Layer) => {
const rect = new Konva.Rect({
id: getLayerBboxId(layerState.id),
id: getLayerBboxId(reduxLayer.id),
name: LAYER_BBOX_NAME,
strokeWidth: 1,
visible: false,
@@ -795,12 +733,12 @@ const createBboxRect = (layerState: Layer, konvaLayer: Konva.Layer): Konva.Rect
/**
* Renders the bounding boxes for the layers.
* @param stage The konva stage
* @param layerStates An array of layers to draw bboxes for
* @param stage The konva stage to render on
* @param reduxLayers An array of all redux layers to draw bboxes for
* @param tool The current tool
* @returns
*/
const renderBboxes = (stage: Konva.Stage, layerStates: Layer[], tool: Tool): void => {
const renderBboxes = (stage: Konva.Stage, reduxLayers: Layer[], tool: Tool) => {
// Hide all bboxes so they don't interfere with getClientRect
for (const bboxRect of stage.find<Konva.Rect>(`.${LAYER_BBOX_NAME}`)) {
bboxRect.visible(false);
@@ -811,39 +749,39 @@ const renderBboxes = (stage: Konva.Stage, layerStates: Layer[], tool: Tool): voi
return;
}
for (const layer of layerStates.filter(isRegionalGuidanceLayer)) {
if (!layer.bbox) {
for (const reduxLayer of reduxLayers.filter(isRegionalGuidanceLayer)) {
if (!reduxLayer.bbox) {
continue;
}
const konvaLayer = stage.findOne<Konva.Layer>(`#${layer.id}`);
assert(konvaLayer, `Layer ${layer.id} not found in stage`);
const konvaLayer = stage.findOne<Konva.Layer>(`#${reduxLayer.id}`);
assert(konvaLayer, `Layer ${reduxLayer.id} not found in stage`);
const bboxRect = konvaLayer.findOne<Konva.Rect>(`.${LAYER_BBOX_NAME}`) ?? createBboxRect(layer, konvaLayer);
const bboxRect = konvaLayer.findOne<Konva.Rect>(`.${LAYER_BBOX_NAME}`) ?? createBboxRect(reduxLayer, konvaLayer);
bboxRect.setAttrs({
visible: !layer.bboxNeedsUpdate,
listening: layer.isSelected,
x: layer.bbox.x,
y: layer.bbox.y,
width: layer.bbox.width,
height: layer.bbox.height,
stroke: layer.isSelected ? BBOX_SELECTED_STROKE : '',
visible: !reduxLayer.bboxNeedsUpdate,
listening: reduxLayer.isSelected,
x: reduxLayer.bbox.x,
y: reduxLayer.bbox.y,
width: reduxLayer.bbox.width,
height: reduxLayer.bbox.height,
stroke: reduxLayer.isSelected ? BBOX_SELECTED_STROKE : '',
});
}
};
/**
* Calculates the bbox of each regional guidance layer. Only calculates if the mask has changed.
* @param stage The konva stage
* @param layerStates An array of layers to calculate bboxes for
* @param stage The konva stage to render on.
* @param reduxLayers An array of redux layers to calculate bboxes for
* @param onBboxChanged Callback for when the bounding box changes
*/
const updateBboxes = (
stage: Konva.Stage,
layerStates: Layer[],
reduxLayers: Layer[],
onBboxChanged: (layerId: string, bbox: IRect | null) => void
): void => {
for (const rgLayer of layerStates.filter(isRegionalGuidanceLayer)) {
) => {
for (const rgLayer of reduxLayers.filter(isRegionalGuidanceLayer)) {
const konvaLayer = stage.findOne<Konva.Layer>(`#${rgLayer.id}`);
assert(konvaLayer, `Layer ${rgLayer.id} not found in stage`);
// We only need to recalculate the bbox if the layer has changed
@@ -870,7 +808,7 @@ const updateBboxes = (
/**
* Creates the background layer for the stage.
* @param stage The konva stage
* @param stage The konva stage to render on
*/
const createBackgroundLayer = (stage: Konva.Stage): Konva.Layer => {
const layer = new Konva.Layer({
@@ -891,17 +829,17 @@ const createBackgroundLayer = (stage: Konva.Stage): Konva.Layer => {
image.onload = () => {
background.fillPatternImage(image);
};
image.src = TRANSPARENCY_CHECKER_PATTERN;
image.src = STAGE_BG_DATAURL;
return layer;
};
/**
* Renders the background layer for the stage.
* @param stage The konva stage
* @param stage The konva stage to render on
* @param width The unscaled width of the canvas
* @param height The unscaled height of the canvas
*/
const renderBackground = (stage: Konva.Stage, width: number, height: number): void => {
const renderBackground = (stage: Konva.Stage, width: number, height: number) => {
const layer = stage.findOne<Konva.Layer>(`#${BACKGROUND_LAYER_ID}`) ?? createBackgroundLayer(stage);
const background = layer.findOne<Konva.Rect>(`#${BACKGROUND_RECT_ID}`);
@@ -942,10 +880,6 @@ const arrangeLayers = (stage: Konva.Stage, layerIds: string[]): void => {
stage.findOne<Konva.Layer>(`#${TOOL_PREVIEW_LAYER_ID}`)?.zIndex(nextZIndex++);
};
/**
* Creates the "no layers" fallback layer
* @param stage The konva stage
*/
const createNoLayersMessageLayer = (stage: Konva.Stage): Konva.Layer => {
const noLayersMessageLayer = new Konva.Layer({
id: NO_LAYERS_MESSAGE_LAYER_ID,
@@ -957,7 +891,7 @@ const createNoLayersMessageLayer = (stage: Konva.Stage): Konva.Layer => {
y: 0,
align: 'center',
verticalAlign: 'middle',
text: t('controlLayers.noLayersAdded', 'No Layers Added'),
text: t('controlLayers.noLayersAdded'),
fontFamily: '"Inter Variable", sans-serif',
fontStyle: '600',
fill: 'white',
@@ -967,14 +901,7 @@ const createNoLayersMessageLayer = (stage: Konva.Stage): Konva.Layer => {
return noLayersMessageLayer;
};
/**
* Renders the "no layers" message when there are no layers to render
* @param stage The konva stage
* @param layerCount The current number of layers
* @param width The target width of the text
* @param height The target height of the text
*/
const renderNoLayersMessage = (stage: Konva.Stage, layerCount: number, width: number, height: number): void => {
const renderNoLayersMessage = (stage: Konva.Stage, layerCount: number, width: number, height: number) => {
const noLayersMessageLayer =
stage.findOne<Konva.Layer>(`#${NO_LAYERS_MESSAGE_LAYER_ID}`) ?? createNoLayersMessageLayer(stage);
if (layerCount === 0) {
@@ -1009,3 +936,20 @@ export const debouncedRenderers = {
arrangeLayers: debounce(arrangeLayers, DEBOUNCE_MS),
updateBboxes: debounce(updateBboxes, DEBOUNCE_MS),
};
/**
* Calculates the lightness (HSL) of a given pixel and sets the alpha channel to that value.
* This is useful for edge maps and other masks, to make the black areas transparent.
* @param imageData The image data to apply the filter to
*/
const LightnessToAlphaFilter = (imageData: ImageData) => {
const len = imageData.data.length / 4;
for (let i = 0; i < len; i++) {
const r = imageData.data[i * 4 + 0] as number;
const g = imageData.data[i * 4 + 1] as number;
const b = imageData.data[i * 4 + 2] as number;
const cMin = Math.min(r, g, b);
const cMax = Math.max(r, g, b);
imageData.data[i * 4 + 3] = (cMin + cMax) / 2;
}
};

View File

@@ -18,7 +18,7 @@ type BaseDropData = {
id: string;
};
export type CurrentImageDropData = BaseDropData & {
type CurrentImageDropData = BaseDropData & {
actionType: 'SET_CURRENT_IMAGE';
};
@@ -79,14 +79,6 @@ export type RemoveFromBoardDropData = BaseDropData & {
actionType: 'REMOVE_FROM_BOARD';
};
export type SelectForCompareDropData = BaseDropData & {
actionType: 'SELECT_FOR_COMPARE';
context: {
firstImageName?: string | null;
secondImageName?: string | null;
};
};
export type TypesafeDroppableData =
| CurrentImageDropData
| ControlAdapterDropData
@@ -97,8 +89,7 @@ export type TypesafeDroppableData =
| CALayerImageDropData
| IPALayerImageDropData
| RGLayerIPAdapterImageDropData
| IILayerImageDropData
| SelectForCompareDropData;
| IILayerImageDropData;
type BaseDragData = {
id: string;
@@ -143,7 +134,7 @@ export type UseDraggableTypesafeReturnValue = Omit<ReturnType<typeof useOriginal
over: TypesafeOver | null;
};
interface TypesafeActive extends Omit<Active, 'data'> {
export interface TypesafeActive extends Omit<Active, 'data'> {
data: React.MutableRefObject<TypesafeDraggableData | undefined>;
}

View File

@@ -1,14 +1,14 @@
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
import type { TypesafeActive, TypesafeDroppableData } from 'features/dnd/types';
export const isValidDrop = (overData?: TypesafeDroppableData | null, activeData?: TypesafeDraggableData | null) => {
if (!overData || !activeData) {
export const isValidDrop = (overData: TypesafeDroppableData | undefined, active: TypesafeActive | null) => {
if (!overData || !active?.data.current) {
return false;
}
const { actionType } = overData;
const { payloadType } = activeData;
const { payloadType } = active.data.current;
if (overData.id === activeData.id) {
if (overData.id === active.data.current.id) {
return false;
}
@@ -29,8 +29,6 @@ export const isValidDrop = (overData?: TypesafeDroppableData | null, activeData?
return payloadType === 'IMAGE_DTO';
case 'SET_NODES_IMAGE':
return payloadType === 'IMAGE_DTO';
case 'SELECT_FOR_COMPARE':
return payloadType === 'IMAGE_DTO';
case 'ADD_TO_BOARD': {
// If the board is the same, don't allow the drop
@@ -42,7 +40,7 @@ export const isValidDrop = (overData?: TypesafeDroppableData | null, activeData?
// Check if the image's board is the board we are dragging onto
if (payloadType === 'IMAGE_DTO') {
const { imageDTO } = activeData.payload;
const { imageDTO } = active.data.current.payload;
const currentBoard = imageDTO.board_id ?? 'none';
const destinationBoard = overData.context.boardId;
@@ -51,7 +49,7 @@ export const isValidDrop = (overData?: TypesafeDroppableData | null, activeData?
if (payloadType === 'GALLERY_SELECTION') {
// Assume all images are on the same board - this is true for the moment
const currentBoard = activeData.payload.boardId;
const currentBoard = active.data.current.payload.boardId;
const destinationBoard = overData.context.boardId;
return currentBoard !== destinationBoard;
}
@@ -69,14 +67,14 @@ export const isValidDrop = (overData?: TypesafeDroppableData | null, activeData?
// Check if the image's board is the board we are dragging onto
if (payloadType === 'IMAGE_DTO') {
const { imageDTO } = activeData.payload;
const { imageDTO } = active.data.current.payload;
const currentBoard = imageDTO.board_id ?? 'none';
return currentBoard !== 'none';
}
if (payloadType === 'GALLERY_SELECTION') {
const currentBoard = activeData.payload.boardId;
const currentBoard = active.data.current.payload.boardId;
return currentBoard !== 'none';
}

View File

@@ -162,7 +162,7 @@ const GalleryBoard = ({ board, isSelected, setBoardToDelete }: GalleryBoardProps
</Flex>
)}
{isSelectedForAutoAdd && <AutoAddIcon />}
<SelectionOverlay isSelected={isSelected} isSelectedForCompare={false} isHovered={isHovered} />
<SelectionOverlay isSelected={isSelected} isHovered={isHovered} />
<Flex
position="absolute"
bottom={0}

View File

@@ -117,7 +117,7 @@ const NoBoardBoard = memo(({ isSelected }: Props) => {
>
{boardName}
</Flex>
<SelectionOverlay isSelected={isSelected} isSelectedForCompare={false} isHovered={isHovered} />
<SelectionOverlay isSelected={isSelected} isHovered={isHovered} />
<IAIDroppable data={droppableData} dropLabel={<Text fontSize="md">{t('unifiedCanvas.move')}</Text>} />
</Flex>
</Tooltip>

View File

@@ -10,7 +10,6 @@ import { iiLayerAdded } from 'features/controlLayers/store/controlLayersSlice';
import { imagesToDeleteSelected } from 'features/deleteImageModal/store/slice';
import { useImageActions } from 'features/gallery/hooks/useImageActions';
import { sentImageToCanvas, sentImageToImg2Img } from 'features/gallery/store/actions';
import { imageToCompareChanged } from 'features/gallery/store/gallerySlice';
import { $templates } from 'features/nodes/store/nodesSlice';
import { selectOptimalDimension } from 'features/parameters/store/generationSlice';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
@@ -28,7 +27,6 @@ import {
PiDownloadSimpleBold,
PiFlowArrowBold,
PiFoldersBold,
PiImagesBold,
PiPlantBold,
PiQuotesBold,
PiShareFatBold,
@@ -46,7 +44,6 @@ type SingleSelectionMenuItemsProps = {
const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
const { imageDTO } = props;
const optimalDimension = useAppSelector(selectOptimalDimension);
const maySelectForCompare = useAppSelector((s) => s.gallery.imageToCompare?.image_name !== imageDTO.image_name);
const dispatch = useAppDispatch();
const { t } = useTranslation();
const isCanvasEnabled = useFeatureStatus('canvas');
@@ -120,10 +117,6 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
downloadImage(imageDTO.image_url, imageDTO.image_name);
}, [downloadImage, imageDTO.image_name, imageDTO.image_url]);
const handleSelectImageForCompare = useCallback(() => {
dispatch(imageToCompareChanged(imageDTO));
}, [dispatch, imageDTO]);
return (
<>
<MenuItem as="a" href={imageDTO.image_url} target="_blank" icon={<PiShareFatBold />}>
@@ -137,9 +130,6 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
<MenuItem icon={<PiDownloadSimpleBold />} onClickCapture={handleDownloadImage}>
{t('parameters.downloadImage')}
</MenuItem>
<MenuItem icon={<PiImagesBold />} isDisabled={!maySelectForCompare} onClick={handleSelectImageForCompare}>
{t('gallery.selectForCompare')}
</MenuItem>
<MenuDivider />
<MenuItem
icon={getAndLoadEmbeddedWorkflowResult.isLoading ? <SpinnerIcon /> : <PiFlowArrowBold />}

View File

@@ -11,7 +11,7 @@ import type { GallerySelectionDraggableData, ImageDraggableData, TypesafeDraggab
import { getGalleryImageDataTestId } from 'features/gallery/components/ImageGrid/getGalleryImageDataTestId';
import { useMultiselect } from 'features/gallery/hooks/useMultiselect';
import { useScrollIntoView } from 'features/gallery/hooks/useScrollIntoView';
import { imageToCompareChanged, isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice';
import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice';
import type { MouseEvent } from 'react';
import { memo, useCallback, useMemo, useState } from 'react';
import { useTranslation } from 'react-i18next';
@@ -46,7 +46,6 @@ const GalleryImage = (props: HoverableImageProps) => {
const { t } = useTranslation();
const selectedBoardId = useAppSelector((s) => s.gallery.selectedBoardId);
const alwaysShowImageSizeBadge = useAppSelector((s) => s.gallery.alwaysShowImageSizeBadge);
const isSelectedForCompare = useAppSelector((s) => s.gallery.imageToCompare?.image_name === imageName);
const { handleClick, isSelected, areMultiplesSelected } = useMultiselect(imageDTO);
const customStarUi = useStore($customStarUI);
@@ -106,7 +105,6 @@ const GalleryImage = (props: HoverableImageProps) => {
const onDoubleClick = useCallback(() => {
dispatch(isImageViewerOpenChanged(true));
dispatch(imageToCompareChanged(null));
}, [dispatch]);
const handleMouseOut = useCallback(() => {
@@ -154,7 +152,6 @@ const GalleryImage = (props: HoverableImageProps) => {
imageDTO={imageDTO}
draggableData={draggableData}
isSelected={isSelected}
isSelectedForCompare={isSelectedForCompare}
minSize={0}
imageSx={imageSx}
isDropDisabled={true}

View File

@@ -28,9 +28,7 @@ const ImageMetadataGraphTabContent = ({ image }: Props) => {
return <IAINoContentFallback label={t('nodes.noGraph')} />;
}
return (
<DataViewer fileName={`${image.image_name.replace('.png', '')}_graph`} data={graph} label={t('nodes.graph')} />
);
return <DataViewer data={graph} label={t('nodes.graph')} />;
};
export default memo(ImageMetadataGraphTabContent);

View File

@@ -68,22 +68,14 @@ const ImageMetadataViewer = ({ image }: ImageMetadataViewerProps) => {
</TabPanel>
<TabPanel>
{metadata ? (
<DataViewer
fileName={`${image.image_name.replace('.png', '')}_metadata`}
data={metadata}
label={t('metadata.metadata')}
/>
<DataViewer data={metadata} label={t('metadata.metadata')} />
) : (
<IAINoContentFallback label={t('metadata.noMetaData')} />
)}
</TabPanel>
<TabPanel>
{image ? (
<DataViewer
fileName={`${image.image_name.replace('.png', '')}_details`}
data={image}
label={t('metadata.imageDetails')}
/>
<DataViewer data={image} label={t('metadata.imageDetails')} />
) : (
<IAINoContentFallback label={t('metadata.noImageDetails')} />
)}

View File

@@ -28,13 +28,7 @@ const ImageMetadataWorkflowTabContent = ({ image }: Props) => {
return <IAINoContentFallback label={t('nodes.noWorkflow')} />;
}
return (
<DataViewer
fileName={`${image.image_name.replace('.png', '')}_workflow`}
data={workflow}
label={t('metadata.workflow')}
/>
);
return <DataViewer data={workflow} label={t('metadata.workflow')} />;
};
export default memo(ImageMetadataWorkflowTabContent);

View File

@@ -1,140 +0,0 @@
import {
Button,
ButtonGroup,
Flex,
Icon,
IconButton,
Kbd,
ListItem,
Tooltip,
UnorderedList,
} from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import {
comparedImagesSwapped,
comparisonFitChanged,
comparisonModeChanged,
comparisonModeCycled,
imageToCompareChanged,
} from 'features/gallery/store/gallerySlice';
import { memo, useCallback } from 'react';
import { useHotkeys } from 'react-hotkeys-hook';
import { Trans, useTranslation } from 'react-i18next';
import { PiArrowsOutBold, PiQuestion, PiSwapBold, PiXBold } from 'react-icons/pi';
export const CompareToolbar = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const comparisonMode = useAppSelector((s) => s.gallery.comparisonMode);
const comparisonFit = useAppSelector((s) => s.gallery.comparisonFit);
const setComparisonModeSlider = useCallback(() => {
dispatch(comparisonModeChanged('slider'));
}, [dispatch]);
const setComparisonModeSideBySide = useCallback(() => {
dispatch(comparisonModeChanged('side-by-side'));
}, [dispatch]);
const setComparisonModeHover = useCallback(() => {
dispatch(comparisonModeChanged('hover'));
}, [dispatch]);
const swapImages = useCallback(() => {
dispatch(comparedImagesSwapped());
}, [dispatch]);
useHotkeys('c', swapImages, [swapImages]);
const toggleComparisonFit = useCallback(() => {
dispatch(comparisonFitChanged(comparisonFit === 'contain' ? 'fill' : 'contain'));
}, [dispatch, comparisonFit]);
const exitCompare = useCallback(() => {
dispatch(imageToCompareChanged(null));
}, [dispatch]);
useHotkeys('esc', exitCompare, [exitCompare]);
const nextMode = useCallback(() => {
dispatch(comparisonModeCycled());
}, [dispatch]);
useHotkeys('m', nextMode, [nextMode]);
return (
<Flex w="full" gap={2}>
<Flex flex={1} justifyContent="center">
<Flex gap={2} marginInlineEnd="auto">
<IconButton
icon={<PiSwapBold />}
aria-label={`${t('gallery.swapImages')} (C)`}
tooltip={`${t('gallery.swapImages')} (C)`}
onClick={swapImages}
/>
{comparisonMode !== 'side-by-side' && (
<IconButton
aria-label={t('gallery.stretchToFit')}
tooltip={t('gallery.stretchToFit')}
onClick={toggleComparisonFit}
colorScheme={comparisonFit === 'fill' ? 'invokeBlue' : 'base'}
variant="outline"
icon={<PiArrowsOutBold />}
/>
)}
</Flex>
</Flex>
<Flex flex={1} gap={4} justifyContent="center">
<ButtonGroup variant="outline">
<Button
flexShrink={0}
onClick={setComparisonModeSlider}
colorScheme={comparisonMode === 'slider' ? 'invokeBlue' : 'base'}
>
{t('gallery.slider')}
</Button>
<Button
flexShrink={0}
onClick={setComparisonModeSideBySide}
colorScheme={comparisonMode === 'side-by-side' ? 'invokeBlue' : 'base'}
>
{t('gallery.sideBySide')}
</Button>
<Button
flexShrink={0}
onClick={setComparisonModeHover}
colorScheme={comparisonMode === 'hover' ? 'invokeBlue' : 'base'}
>
{t('gallery.hover')}
</Button>
</ButtonGroup>
</Flex>
<Flex flex={1} justifyContent="center">
<Flex gap={2} marginInlineStart="auto" alignItems="center">
<Tooltip label={<CompareHelp />}>
<Flex alignItems="center">
<Icon boxSize={8} color="base.500" as={PiQuestion} lineHeight={0} />
</Flex>
</Tooltip>
<IconButton
icon={<PiXBold />}
aria-label={`${t('gallery.exitCompare')} (Esc)`}
tooltip={`${t('gallery.exitCompare')} (Esc)`}
onClick={exitCompare}
/>
</Flex>
</Flex>
</Flex>
);
});
CompareToolbar.displayName = 'CompareToolbar';
const CompareHelp = () => {
return (
<UnorderedList>
<ListItem>
<Trans i18nKey="gallery.compareHelp1" components={{ Kbd: <Kbd /> }}></Trans>
</ListItem>
<ListItem>
<Trans i18nKey="gallery.compareHelp2" components={{ Kbd: <Kbd /> }}></Trans>
</ListItem>
<ListItem>
<Trans i18nKey="gallery.compareHelp3" components={{ Kbd: <Kbd /> }}></Trans>
</ListItem>
<ListItem>
<Trans i18nKey="gallery.compareHelp4" components={{ Kbd: <Kbd /> }}></Trans>
</ListItem>
</UnorderedList>
);
};

View File

@@ -4,7 +4,7 @@ import { skipToken } from '@reduxjs/toolkit/query';
import { useAppSelector } from 'app/store/storeHooks';
import IAIDndImage from 'common/components/IAIDndImage';
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
import type { TypesafeDraggableData } from 'features/dnd/types';
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
import ImageMetadataViewer from 'features/gallery/components/ImageMetadataViewer/ImageMetadataViewer';
import NextPrevImageButtons from 'features/gallery/components/NextPrevImageButtons';
import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors';
@@ -22,7 +22,21 @@ const selectLastSelectedImageName = createSelector(
(lastSelectedImage) => lastSelectedImage?.image_name
);
const CurrentImagePreview = () => {
type Props = {
isDragDisabled?: boolean;
isDropDisabled?: boolean;
withNextPrevButtons?: boolean;
withMetadata?: boolean;
alwaysShowProgress?: boolean;
};
const CurrentImagePreview = ({
isDragDisabled = false,
isDropDisabled = false,
withNextPrevButtons = true,
withMetadata = true,
alwaysShowProgress = false,
}: Props) => {
const { t } = useTranslation();
const shouldShowImageDetails = useAppSelector((s) => s.ui.shouldShowImageDetails);
const imageName = useAppSelector(selectLastSelectedImageName);
@@ -41,6 +55,14 @@ const CurrentImagePreview = () => {
}
}, [imageDTO]);
const droppableData = useMemo<TypesafeDroppableData | undefined>(
() => ({
id: 'current-image',
actionType: 'SET_CURRENT_IMAGE',
}),
[]
);
// Show and hide the next/prev buttons on mouse move
const [shouldShowNextPrevButtons, setShouldShowNextPrevButtons] = useState<boolean>(false);
const timeoutId = useRef(0);
@@ -64,27 +86,30 @@ const CurrentImagePreview = () => {
justifyContent="center"
position="relative"
>
{hasDenoiseProgress && shouldShowProgressInViewer ? (
{hasDenoiseProgress && (shouldShowProgressInViewer || alwaysShowProgress) ? (
<ProgressImage />
) : (
<IAIDndImage
imageDTO={imageDTO}
droppableData={droppableData}
draggableData={draggableData}
isDropDisabled={true}
isDragDisabled={isDragDisabled}
isDropDisabled={isDropDisabled}
isUploadDisabled={true}
fitContainer
useThumbailFallback
dropLabel={t('gallery.setCurrentImage')}
noContentFallback={<IAINoContentFallback icon={PiImageBold} label={t('gallery.noImageSelected')} />}
dataTestId="image-preview"
/>
)}
{shouldShowImageDetails && imageDTO && (
{shouldShowImageDetails && imageDTO && withMetadata && (
<Box position="absolute" opacity={0.8} top={0} width="full" height="full" borderRadius="base">
<ImageMetadataViewer image={imageDTO} />
</Box>
)}
<AnimatePresence>
{shouldShowNextPrevButtons && imageDTO && (
{withNextPrevButtons && shouldShowNextPrevButtons && imageDTO && (
<Box
as={motion.div}
key="nextPrevButtons"

View File

@@ -1,41 +0,0 @@
import { useAppSelector } from 'app/store/storeHooks';
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
import type { Dimensions } from 'features/canvas/store/canvasTypes';
import { selectComparisonImages } from 'features/gallery/components/ImageViewer/common';
import { ImageComparisonHover } from 'features/gallery/components/ImageViewer/ImageComparisonHover';
import { ImageComparisonSideBySide } from 'features/gallery/components/ImageViewer/ImageComparisonSideBySide';
import { ImageComparisonSlider } from 'features/gallery/components/ImageViewer/ImageComparisonSlider';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiImagesBold } from 'react-icons/pi';
type Props = {
containerDims: Dimensions;
};
export const ImageComparison = memo(({ containerDims }: Props) => {
const { t } = useTranslation();
const comparisonMode = useAppSelector((s) => s.gallery.comparisonMode);
const { firstImage, secondImage } = useAppSelector(selectComparisonImages);
if (!firstImage || !secondImage) {
// Should rarely/never happen - we don't render this component unless we have images to compare
return <IAINoContentFallback label={t('gallery.selectAnImageToCompare')} icon={PiImagesBold} />;
}
if (comparisonMode === 'slider') {
return <ImageComparisonSlider containerDims={containerDims} firstImage={firstImage} secondImage={secondImage} />;
}
if (comparisonMode === 'side-by-side') {
return (
<ImageComparisonSideBySide containerDims={containerDims} firstImage={firstImage} secondImage={secondImage} />
);
}
if (comparisonMode === 'hover') {
return <ImageComparisonHover containerDims={containerDims} firstImage={firstImage} secondImage={secondImage} />;
}
});
ImageComparison.displayName = 'ImageComparison';

View File

@@ -1,47 +0,0 @@
import { Flex } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import IAIDroppable from 'common/components/IAIDroppable';
import type { CurrentImageDropData, SelectForCompareDropData } from 'features/dnd/types';
import { useImageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
import { memo, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { selectComparisonImages } from './common';
const setCurrentImageDropData: CurrentImageDropData = {
id: 'current-image',
actionType: 'SET_CURRENT_IMAGE',
};
export const ImageComparisonDroppable = memo(() => {
const { t } = useTranslation();
const imageViewer = useImageViewer();
const { firstImage, secondImage } = useAppSelector(selectComparisonImages);
const selectForCompareDropData = useMemo<SelectForCompareDropData>(
() => ({
id: 'image-comparison',
actionType: 'SELECT_FOR_COMPARE',
context: {
firstImageName: firstImage?.image_name,
secondImageName: secondImage?.image_name,
},
}),
[firstImage?.image_name, secondImage?.image_name]
);
if (!imageViewer.isOpen) {
return (
<Flex position="absolute" top={0} right={0} bottom={0} left={0} gap={2} pointerEvents="none">
<IAIDroppable data={setCurrentImageDropData} dropLabel={t('gallery.openInViewer')} />
</Flex>
);
}
return (
<Flex position="absolute" top={0} right={0} bottom={0} left={0} gap={2} pointerEvents="none">
<IAIDroppable data={selectForCompareDropData} dropLabel={t('gallery.selectForCompare')} />
</Flex>
);
});
ImageComparisonDroppable.displayName = 'ImageComparisonDroppable';

View File

@@ -1,117 +0,0 @@
import { Box, Flex, Image } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { useBoolean } from 'common/hooks/useBoolean';
import { preventDefault } from 'common/util/stopPropagation';
import type { Dimensions } from 'features/canvas/store/canvasTypes';
import { TRANSPARENCY_CHECKER_PATTERN } from 'features/controlLayers/konva/constants';
import { ImageComparisonLabel } from 'features/gallery/components/ImageViewer/ImageComparisonLabel';
import { memo, useMemo, useRef } from 'react';
import type { ComparisonProps } from './common';
import { fitDimsToContainer, getSecondImageDims } from './common';
export const ImageComparisonHover = memo(({ firstImage, secondImage, containerDims }: ComparisonProps) => {
const comparisonFit = useAppSelector((s) => s.gallery.comparisonFit);
const imageContainerRef = useRef<HTMLDivElement>(null);
const mouseOver = useBoolean(false);
const fittedDims = useMemo<Dimensions>(
() => fitDimsToContainer(containerDims, firstImage),
[containerDims, firstImage]
);
const compareImageDims = useMemo<Dimensions>(
() => getSecondImageDims(comparisonFit, fittedDims, firstImage, secondImage),
[comparisonFit, fittedDims, firstImage, secondImage]
);
return (
<Flex w="full" h="full" maxW="full" maxH="full" position="relative" alignItems="center" justifyContent="center">
<Flex
id="image-comparison-wrapper"
w="full"
h="full"
maxW="full"
maxH="full"
position="absolute"
alignItems="center"
justifyContent="center"
>
<Box
ref={imageContainerRef}
position="relative"
id="image-comparison-hover-image-container"
w={fittedDims.width}
h={fittedDims.height}
maxW="full"
maxH="full"
userSelect="none"
overflow="hidden"
borderRadius="base"
>
<Image
id="image-comparison-hover-first-image"
src={firstImage.image_url}
fallbackSrc={firstImage.thumbnail_url}
w={fittedDims.width}
h={fittedDims.height}
maxW="full"
maxH="full"
objectFit="cover"
objectPosition="top left"
/>
<ImageComparisonLabel type="first" opacity={mouseOver.isTrue ? 0 : 1} />
<Box
id="image-comparison-hover-second-image-container"
position="absolute"
top={0}
left={0}
right={0}
bottom={0}
overflow="hidden"
opacity={mouseOver.isTrue ? 1 : 0}
transitionDuration="0.2s"
transitionProperty="common"
>
<Box
id="image-comparison-hover-bg"
position="absolute"
top={0}
left={0}
right={0}
bottom={0}
backgroundImage={TRANSPARENCY_CHECKER_PATTERN}
backgroundRepeat="repeat"
opacity={0.2}
/>
<Image
position="relative"
id="image-comparison-hover-second-image"
src={secondImage.image_url}
fallbackSrc={secondImage.thumbnail_url}
w={compareImageDims.width}
h={compareImageDims.height}
maxW={fittedDims.width}
maxH={fittedDims.height}
objectFit={comparisonFit}
objectPosition="top left"
/>
<ImageComparisonLabel type="second" opacity={mouseOver.isTrue ? 1 : 0} />
</Box>
<Box
id="image-comparison-hover-interaction-overlay"
position="absolute"
top={0}
right={0}
bottom={0}
left={0}
onMouseOver={mouseOver.setTrue}
onMouseOut={mouseOver.setFalse}
onContextMenu={preventDefault}
userSelect="none"
/>
</Box>
</Flex>
</Flex>
);
});
ImageComparisonHover.displayName = 'ImageComparisonHover';

View File

@@ -1,33 +0,0 @@
import type { TextProps } from '@invoke-ai/ui-library';
import { Text } from '@invoke-ai/ui-library';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { DROP_SHADOW } from './common';
type Props = TextProps & {
type: 'first' | 'second';
};
export const ImageComparisonLabel = memo(({ type, ...rest }: Props) => {
const { t } = useTranslation();
return (
<Text
position="absolute"
bottom={4}
insetInlineEnd={type === 'first' ? undefined : 4}
insetInlineStart={type === 'first' ? 4 : undefined}
textOverflow="clip"
whiteSpace="nowrap"
filter={DROP_SHADOW}
color="base.50"
transitionDuration="0.2s"
transitionProperty="common"
{...rest}
>
{type === 'first' ? t('gallery.viewerImage') : t('gallery.compareImage')}
</Text>
);
});
ImageComparisonLabel.displayName = 'ImageComparisonLabel';

View File

@@ -1,70 +0,0 @@
import { Flex, Image } from '@invoke-ai/ui-library';
import type { ComparisonProps } from 'features/gallery/components/ImageViewer/common';
import { ImageComparisonLabel } from 'features/gallery/components/ImageViewer/ImageComparisonLabel';
import ResizeHandle from 'features/ui/components/tabs/ResizeHandle';
import { memo, useCallback, useRef } from 'react';
import type { ImperativePanelGroupHandle } from 'react-resizable-panels';
import { Panel, PanelGroup } from 'react-resizable-panels';
export const ImageComparisonSideBySide = memo(({ firstImage, secondImage }: ComparisonProps) => {
const panelGroupRef = useRef<ImperativePanelGroupHandle>(null);
const onDoubleClickHandle = useCallback(() => {
if (!panelGroupRef.current) {
return;
}
panelGroupRef.current.setLayout([50, 50]);
}, []);
return (
<Flex w="full" h="full" maxW="full" maxH="full" position="relative" alignItems="center" justifyContent="center">
<Flex w="full" h="full" maxW="full" maxH="full" position="absolute" alignItems="center" justifyContent="center">
<PanelGroup ref={panelGroupRef} direction="horizontal" id="image-comparison-side-by-side">
<Panel minSize={20}>
<Flex position="relative" w="full" h="full" alignItems="center" justifyContent="center">
<Flex position="absolute" maxW="full" maxH="full" aspectRatio={firstImage.width / firstImage.height}>
<Image
id="image-comparison-side-by-side-first-image"
w={firstImage.width}
h={firstImage.height}
maxW="full"
maxH="full"
src={firstImage.image_url}
fallbackSrc={firstImage.thumbnail_url}
objectFit="contain"
borderRadius="base"
/>
<ImageComparisonLabel type="first" />
</Flex>
</Flex>
</Panel>
<ResizeHandle
id="image-comparison-side-by-side-handle"
onDoubleClick={onDoubleClickHandle}
orientation="vertical"
/>
<Panel minSize={20}>
<Flex position="relative" w="full" h="full" alignItems="center" justifyContent="center">
<Flex position="absolute" maxW="full" maxH="full" aspectRatio={secondImage.width / secondImage.height}>
<Image
id="image-comparison-side-by-side-first-image"
w={secondImage.width}
h={secondImage.height}
maxW="full"
maxH="full"
src={secondImage.image_url}
fallbackSrc={secondImage.thumbnail_url}
objectFit="contain"
borderRadius="base"
/>
<ImageComparisonLabel type="second" />
</Flex>
</Flex>
</Panel>
</PanelGroup>
</Flex>
</Flex>
);
});
ImageComparisonSideBySide.displayName = 'ImageComparisonSideBySide';

Some files were not shown because too many files have changed in this diff Show More