mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 23:28:03 -05:00
Compare commits
184 Commits
psychedeli
...
v5.4.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b7132ce9e7 | ||
|
|
90f30e7748 | ||
|
|
6b86a66bc7 | ||
|
|
aa97e626e9 | ||
|
|
c90736093f | ||
|
|
0bff4ace1b | ||
|
|
5eb382074e | ||
|
|
46aa930526 | ||
|
|
3305bad0c2 | ||
|
|
13703d8f55 | ||
|
|
60d838d0a5 | ||
|
|
2a157a44bf | ||
|
|
d61b5833c2 | ||
|
|
c094838c6a | ||
|
|
2d334c8dd8 | ||
|
|
a6be26e174 | ||
|
|
f8c7adddd0 | ||
|
|
17da1d92e9 | ||
|
|
1cc57a4854 | ||
|
|
3993fae331 | ||
|
|
1446526d55 | ||
|
|
62c024e725 | ||
|
|
1e92bb4e94 | ||
|
|
db6398fdf6 | ||
|
|
ebd73a2ac2 | ||
|
|
8ee95cab00 | ||
|
|
d1184201a8 | ||
|
|
5887891654 | ||
|
|
765ca4e004 | ||
|
|
159b00a490 | ||
|
|
3fbf6f2d2a | ||
|
|
931fca7cd1 | ||
|
|
db84a3a5d4 | ||
|
|
ca8313e805 | ||
|
|
df849035ee | ||
|
|
8d97fe69ca | ||
|
|
9044e53a9b | ||
|
|
6012b0f912 | ||
|
|
bb0ed5dc8a | ||
|
|
021552fd81 | ||
|
|
be73dbba92 | ||
|
|
db9c0cad7c | ||
|
|
54b7f9a063 | ||
|
|
7d488a5352 | ||
|
|
4d7667f63d | ||
|
|
08704ee8ec | ||
|
|
5910892c33 | ||
|
|
46a09d9e90 | ||
|
|
df0c7d73f3 | ||
|
|
3905c97e32 | ||
|
|
0be796a808 | ||
|
|
7dd33b0f39 | ||
|
|
484aaf1595 | ||
|
|
c276b60af9 | ||
|
|
5d8dd6e26e | ||
|
|
5bca68d873 | ||
|
|
64364e7911 | ||
|
|
6565cea039 | ||
|
|
3ebd8d6c07 | ||
|
|
e970185161 | ||
|
|
fa5653cdf7 | ||
|
|
9a7b000995 | ||
|
|
3a27242838 | ||
|
|
8cfb032051 | ||
|
|
06a9d4e2b2 | ||
|
|
ed46acee79 | ||
|
|
b54463d294 | ||
|
|
faee79dc95 | ||
|
|
965cd76e33 | ||
|
|
e5e8cbf34c | ||
|
|
3412a52594 | ||
|
|
e01f66b026 | ||
|
|
53abdde242 | ||
|
|
94c088300f | ||
|
|
3741a6f5e0 | ||
|
|
059336258f | ||
|
|
2c23b8414c | ||
|
|
271cc52c80 | ||
|
|
20356c0746 | ||
|
|
e44458609f | ||
|
|
69d86a7696 | ||
|
|
56db1a9292 | ||
|
|
cf50e5eeee | ||
|
|
c9c07968d2 | ||
|
|
97d0757176 | ||
|
|
0f51b677a9 | ||
|
|
56ca94c3a9 | ||
|
|
28d169f859 | ||
|
|
92f71d99ee | ||
|
|
0764c02b1d | ||
|
|
081c7569fe | ||
|
|
20f6532ee8 | ||
|
|
b9e8910478 | ||
|
|
ded8391e3c | ||
|
|
e9dd2c396a | ||
|
|
0d86de0cb5 | ||
|
|
bad1149504 | ||
|
|
fda7aaa7ca | ||
|
|
85c616fa34 | ||
|
|
549f4e9794 | ||
|
|
ef8ededd2f | ||
|
|
1948ffe106 | ||
|
|
c70f4404c4 | ||
|
|
b157ae928c | ||
|
|
7a0871992d | ||
|
|
b38e2e14f4 | ||
|
|
7c0e70ec84 | ||
|
|
a89ae9d2bf | ||
|
|
ad1fcb3f07 | ||
|
|
87d74b910b | ||
|
|
7ad1c297a4 | ||
|
|
fbc629faa6 | ||
|
|
7baa6b3c09 | ||
|
|
53d482bade | ||
|
|
5aca04b51b | ||
|
|
ea8787c8ff | ||
|
|
cead2c4445 | ||
|
|
f76ac1808c | ||
|
|
f01210861b | ||
|
|
f757f23ef0 | ||
|
|
872a6ef209 | ||
|
|
4267e5ffc4 | ||
|
|
a69c5ff9ef | ||
|
|
3ebd8d7d1b | ||
|
|
1fd80d54a4 | ||
|
|
991f63e455 | ||
|
|
6a1efd3527 | ||
|
|
0eadc0dd9e | ||
|
|
481423d678 | ||
|
|
89ede0aef3 | ||
|
|
359bdee9c6 | ||
|
|
0e6fba3763 | ||
|
|
652502d7a6 | ||
|
|
91d981a49e | ||
|
|
24f61d21b2 | ||
|
|
eb9a4177c5 | ||
|
|
3c43351a5b | ||
|
|
b1359b6dff | ||
|
|
bddccf6d2f | ||
|
|
21ffaab2a2 | ||
|
|
1e969f938f | ||
|
|
9c6c86ee4f | ||
|
|
6b53a48b48 | ||
|
|
c813fa3fc0 | ||
|
|
a08e61184a | ||
|
|
a0d62a5f41 | ||
|
|
616c0f11e1 | ||
|
|
e1626a4e49 | ||
|
|
6ab891a319 | ||
|
|
492de41316 | ||
|
|
c064efc866 | ||
|
|
1a0885bfb1 | ||
|
|
e8b202d0a5 | ||
|
|
c6fc82f756 | ||
|
|
9a77e951d2 | ||
|
|
8bd4207a27 | ||
|
|
0bb601aaf7 | ||
|
|
2da25a0043 | ||
|
|
51d0931898 | ||
|
|
357b68d1ba | ||
|
|
d9ddb6c32e | ||
|
|
ad02a99a83 | ||
|
|
b707dafc7b | ||
|
|
02906c8f5d | ||
|
|
8538e508f1 | ||
|
|
8c333ffd14 | ||
|
|
72ace5fdff | ||
|
|
9b7583fc84 | ||
|
|
989eee338e | ||
|
|
acc3d7b91b | ||
|
|
49de868658 | ||
|
|
b1702c7d90 | ||
|
|
e49e19ea13 | ||
|
|
c9f91f391e | ||
|
|
4cb6b2b701 | ||
|
|
7d132ea148 | ||
|
|
1088accd91 | ||
|
|
8d237d8f8b | ||
|
|
0c86a3232d | ||
|
|
dbfb0359cb | ||
|
|
b4c2aa596b | ||
|
|
87e89b7995 | ||
|
|
9b089430e2 | ||
|
|
f2b0025958 |
14
SECURITY.md
Normal file
14
SECURITY.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Only the latest version of Invoke will receive security updates.
|
||||
We do not currently maintain multiple versions of the application with updates.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
To report a vulnerability, contact the Invoke team directly at security@invoke.ai
|
||||
|
||||
At this time, we do not maintain a formal bug bounty program.
|
||||
|
||||
You can also share identified security issues with our team on huntr.com
|
||||
@@ -50,7 +50,7 @@ Applications are built on top of the invoke framework. They should construct `in
|
||||
|
||||
### Web UI
|
||||
|
||||
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/frontend` and the backend code is found in `/ldm/invoke/app/api_app.py` and `/ldm/invoke/app/api/`. The code is further organized as such:
|
||||
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/invokeai/frontend` and the backend code is found in `/invokeai/app/api_app.py` and `/invokeai/app/api/`. The code is further organized as such:
|
||||
|
||||
| Component | Description |
|
||||
| --- | --- |
|
||||
@@ -62,7 +62,7 @@ The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.t
|
||||
|
||||
### CLI
|
||||
|
||||
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/ldm/invoke/app/cli_app.py`.
|
||||
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/invokeai/frontend/cli`.
|
||||
|
||||
## Invoke
|
||||
|
||||
@@ -70,7 +70,7 @@ The Invoke framework provides the interface to the underlying AI systems and is
|
||||
|
||||
### Invoker
|
||||
|
||||
The invoker (`/ldm/invoke/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
|
||||
The invoker (`/invokeai/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
|
||||
- **invocation services**, which are used by invocations to interact with core functionality.
|
||||
- **invoker services**, which are used by the invoker to manage sessions and manage the invocation queue.
|
||||
|
||||
@@ -82,12 +82,12 @@ The session graph does not support looping. This is left as an application probl
|
||||
|
||||
### Invocations
|
||||
|
||||
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/ldm/invoke/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
|
||||
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/invokeai/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
|
||||
|
||||
### Services
|
||||
|
||||
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/ldm/invoke/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
|
||||
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/invokeai/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
|
||||
|
||||
## AI Core
|
||||
|
||||
The AI Core is represented by the rest of the code base (i.e. the code outside of `/ldm/invoke/app/`).
|
||||
The AI Core is represented by the rest of the code base (i.e. the code outside of `/invokeai/app/`).
|
||||
|
||||
@@ -287,8 +287,8 @@ new Invocation ready to be used.
|
||||
|
||||
Once you've created a Node, the next step is to share it with the community! The
|
||||
best way to do this is to submit a Pull Request to add the Node to the
|
||||
[Community Nodes](nodes/communityNodes) list. If you're not sure how to do that,
|
||||
take a look a at our [contributing nodes overview](contributingNodes).
|
||||
[Community Nodes](../nodes/communityNodes.md) list. If you're not sure how to do that,
|
||||
take a look a at our [contributing nodes overview](../nodes/contributingNodes.md).
|
||||
|
||||
## Advanced
|
||||
|
||||
|
||||
@@ -9,20 +9,20 @@ model. These are the:
|
||||
configuration information. Among other things, the record service
|
||||
tracks the type of the model, its provenance, and where it can be
|
||||
found on disk.
|
||||
|
||||
|
||||
* _ModelInstallServiceBase_ A service for installing models to
|
||||
disk. It uses `DownloadQueueServiceBase` to download models and
|
||||
their metadata, and `ModelRecordServiceBase` to store that
|
||||
information. It is also responsible for managing the InvokeAI
|
||||
`models` directory and its contents.
|
||||
|
||||
|
||||
* _DownloadQueueServiceBase_
|
||||
A multithreaded downloader responsible
|
||||
for downloading models from a remote source to disk. The download
|
||||
queue has special methods for downloading repo_id folders from
|
||||
Hugging Face, as well as discriminating among model versions in
|
||||
Civitai, but can be used for arbitrary content.
|
||||
|
||||
|
||||
* _ModelLoadServiceBase_
|
||||
Responsible for loading a model from disk
|
||||
into RAM and VRAM and getting it ready for inference.
|
||||
@@ -207,9 +207,9 @@ for use in the InvokeAI web server. Its signature is:
|
||||
|
||||
```
|
||||
def open(
|
||||
cls,
|
||||
config: InvokeAIAppConfig,
|
||||
conn: Optional[sqlite3.Connection] = None,
|
||||
cls,
|
||||
config: InvokeAIAppConfig,
|
||||
conn: Optional[sqlite3.Connection] = None,
|
||||
lock: Optional[threading.Lock] = None
|
||||
) -> Union[ModelRecordServiceSQL, ModelRecordServiceFile]:
|
||||
```
|
||||
@@ -363,7 +363,7 @@ functionality:
|
||||
|
||||
* Registering a model config record for a model already located on the
|
||||
local filesystem, without moving it or changing its path.
|
||||
|
||||
|
||||
* Installing a model alreadiy located on the local filesystem, by
|
||||
moving it into the InvokeAI root directory under the
|
||||
`models` folder (or wherever config parameter `models_dir`
|
||||
@@ -371,21 +371,21 @@ functionality:
|
||||
|
||||
* Probing of models to determine their type, base type and other key
|
||||
information.
|
||||
|
||||
|
||||
* Interface with the InvokeAI event bus to provide status updates on
|
||||
the download, installation and registration process.
|
||||
|
||||
|
||||
* Downloading a model from an arbitrary URL and installing it in
|
||||
`models_dir`.
|
||||
|
||||
* Special handling for HuggingFace repo_ids to recursively download
|
||||
the contents of the repository, paying attention to alternative
|
||||
variants such as fp16.
|
||||
|
||||
|
||||
* Saving tags and other metadata about the model into the invokeai database
|
||||
when fetching from a repo that provides that type of information,
|
||||
(currently only HuggingFace).
|
||||
|
||||
|
||||
### Initializing the installer
|
||||
|
||||
A default installer is created at InvokeAI api startup time and stored
|
||||
@@ -461,7 +461,7 @@ revision.
|
||||
`config` is an optional dict of values that will override the
|
||||
autoprobed values for model type, base, scheduler prediction type, and
|
||||
so forth. See [Model configuration and
|
||||
probing](#Model-configuration-and-probing) for details.
|
||||
probing](#model-configuration-and-probing) for details.
|
||||
|
||||
`access_token` is an optional access token for accessing resources
|
||||
that need authentication.
|
||||
@@ -494,7 +494,7 @@ source8 = URLModelSource(url='https://civitai.com/api/download/models/63006', ac
|
||||
|
||||
for source in [source1, source2, source3, source4, source5, source6, source7]:
|
||||
install_job = installer.install_model(source)
|
||||
|
||||
|
||||
source2job = installer.wait_for_installs(timeout=120)
|
||||
for source in sources:
|
||||
job = source2job[source]
|
||||
@@ -504,7 +504,7 @@ for source in sources:
|
||||
print(f"{source} installed as {model_key}")
|
||||
elif job.errored:
|
||||
print(f"{source}: {job.error_type}.\nStack trace:\n{job.error}")
|
||||
|
||||
|
||||
```
|
||||
|
||||
As shown here, the `import_model()` method accepts a variety of
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# InvokeAI Backend Tests
|
||||
|
||||
We use `pytest` to run the backend python tests. (See [pyproject.toml](/pyproject.toml) for the default `pytest` options.)
|
||||
We use `pytest` to run the backend python tests. (See [pyproject.toml](https://github.com/invoke-ai/InvokeAI/blob/main/pyproject.toml) for the default `pytest` options.)
|
||||
|
||||
## Fast vs. Slow
|
||||
All tests are categorized as either 'fast' (no test annotation) or 'slow' (annotated with the `@pytest.mark.slow` decorator).
|
||||
@@ -33,7 +33,7 @@ pytest tests -m ""
|
||||
|
||||
## Test Organization
|
||||
|
||||
All backend tests are in the [`tests/`](/tests/) directory. This directory mirrors the organization of the `invokeai/` directory. For example, tests for `invokeai/model_management/model_manager.py` would be found in `tests/model_management/test_model_manager.py`.
|
||||
All backend tests are in the [`tests/`](https://github.com/invoke-ai/InvokeAI/tree/main/tests) directory. This directory mirrors the organization of the `invokeai/` directory. For example, tests for `invokeai/model_management/model_manager.py` would be found in `tests/model_management/test_model_manager.py`.
|
||||
|
||||
TODO: The above statement is aspirational. A re-organization of legacy tests is required to make it true.
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## **What do I need to know to help?**
|
||||
|
||||
If you are looking to help with a code contribution, InvokeAI uses several different technologies under the hood: Python (Pydantic, FastAPI, diffusers) and Typescript (React, Redux Toolkit, ChakraUI, Mantine, Konva). Familiarity with StableDiffusion and image generation concepts is helpful, but not essential.
|
||||
If you are looking to help with a code contribution, InvokeAI uses several different technologies under the hood: Python (Pydantic, FastAPI, diffusers) and Typescript (React, Redux Toolkit, ChakraUI, Mantine, Konva). Familiarity with StableDiffusion and image generation concepts is helpful, but not essential.
|
||||
|
||||
|
||||
## **Get Started**
|
||||
@@ -12,7 +12,7 @@ To get started, take a look at our [new contributors checklist](newContributorCh
|
||||
Once you're setup, for more information, you can review the documentation specific to your area of interest:
|
||||
|
||||
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
|
||||
* #### [Frontend Documentation](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web)
|
||||
* #### [Frontend Documentation](../frontend/index.md)
|
||||
* #### [Node Documentation](../INVOCATIONS.md)
|
||||
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
|
||||
|
||||
@@ -20,15 +20,15 @@ Once you're setup, for more information, you can review the documentation specif
|
||||
|
||||
If you don't feel ready to make a code contribution yet, no problem! You can also help out in other ways, such as [documentation](documentation.md), [translation](translation.md) or helping support other users and triage issues as they're reported in GitHub.
|
||||
|
||||
There are two paths to making a development contribution:
|
||||
There are two paths to making a development contribution:
|
||||
|
||||
1. Choosing an open issue to address. Open issues can be found in the [Issues](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen) section of the InvokeAI repository. These are tagged by the issue type (bug, enhancement, etc.) along with the “good first issues” tag denoting if they are suitable for first time contributors.
|
||||
1. Additional items can be found on our [roadmap](https://github.com/orgs/invoke-ai/projects/7). The roadmap is organized in terms of priority, and contains features of varying size and complexity. If there is an inflight item you’d like to help with, reach out to the contributor assigned to the item to see how you can help.
|
||||
1. Additional items can be found on our [roadmap](https://github.com/orgs/invoke-ai/projects/7). The roadmap is organized in terms of priority, and contains features of varying size and complexity. If there is an inflight item you’d like to help with, reach out to the contributor assigned to the item to see how you can help.
|
||||
2. Opening a new issue or feature to add. **Please make sure you have searched through existing issues before creating new ones.**
|
||||
|
||||
*Regardless of what you choose, please post in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord before you start development in order to confirm that the issue or feature is aligned with the current direction of the project. We value our contributors time and effort and want to ensure that no one’s time is being misspent.*
|
||||
|
||||
## Best Practices:
|
||||
## Best Practices:
|
||||
* Keep your pull requests small. Smaller pull requests are more likely to be accepted and merged
|
||||
* Comments! Commenting your code helps reviewers easily understand your contribution
|
||||
* Use Python and Typescript’s typing systems, and consider using an editor with [LSP](https://microsoft.github.io/language-server-protocol/) support to streamline development
|
||||
@@ -38,7 +38,7 @@ There are two paths to making a development contribution:
|
||||
|
||||
If you need help, you can ask questions in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord.
|
||||
|
||||
For frontend related work, **@psychedelicious** is the best person to reach out to.
|
||||
For frontend related work, **@psychedelicious** is the best person to reach out to.
|
||||
|
||||
For backend related work, please reach out to **@blessedcoolant**, **@lstein**, **@StAlKeR7779** or **@psychedelicious**.
|
||||
|
||||
|
||||
@@ -22,15 +22,15 @@ Before starting these steps, ensure you have your local environment [configured
|
||||
2. Fork the [InvokeAI](https://github.com/invoke-ai/InvokeAI) repository to your GitHub profile. This means that you will have a copy of the repository under **your-GitHub-username/InvokeAI**.
|
||||
3. Clone the repository to your local machine using:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/your-GitHub-username/InvokeAI.git
|
||||
```
|
||||
```bash
|
||||
git clone https://github.com/your-GitHub-username/InvokeAI.git
|
||||
```
|
||||
|
||||
If you're unfamiliar with using Git through the commandline, [GitHub Desktop](https://desktop.github.com) is a easy-to-use alternative with a UI. You can do all the same steps listed here, but through the interface. 4. Create a new branch for your fix using:
|
||||
|
||||
```bash
|
||||
git checkout -b branch-name-here
|
||||
```
|
||||
```bash
|
||||
git checkout -b branch-name-here
|
||||
```
|
||||
|
||||
5. Make the appropriate changes for the issue you are trying to address or the feature that you want to add.
|
||||
6. Add the file contents of the changed files to the "snapshot" git uses to manage the state of the project, also known as the index:
|
||||
|
||||
@@ -27,9 +27,9 @@ If you just want to use Invoke, you should use the [installer][installer link].
|
||||
|
||||
5. Activate the venv (you'll need to do this every time you want to run the app):
|
||||
|
||||
```sh
|
||||
source .venv/bin/activate
|
||||
```
|
||||
```sh
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
6. Install the repo as an [editable install][editable install link]:
|
||||
|
||||
@@ -37,7 +37,7 @@ If you just want to use Invoke, you should use the [installer][installer link].
|
||||
pip install -e ".[dev,test,xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||
```
|
||||
|
||||
Refer to the [manual installation][manual install link]] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
|
||||
Refer to the [manual installation][manual install link] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
|
||||
|
||||
7. Install the frontend dev toolchain:
|
||||
|
||||
|
||||
@@ -34,11 +34,11 @@ Please reach out to @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy)
|
||||
|
||||
## Contributors
|
||||
|
||||
This project is a combined effort of dedicated people from across the world. [Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for their time, hard work and effort.
|
||||
This project is a combined effort of dedicated people from across the world. [Check out the list of all these amazing people](contributors.md). We thank them for their time, hard work and effort.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](https://github.com/invoke-ai/InvokeAI/blob/main/CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
|
||||
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](../CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
|
||||
|
||||
By making a contribution to this project, you certify that:
|
||||
|
||||
|
||||
@@ -99,7 +99,6 @@ their descriptions.
|
||||
| Scale Latents | Scales latents by a given factor. |
|
||||
| Segment Anything Processor | Applies segment anything processing to image |
|
||||
| Show Image | Displays a provided image, and passes it forward in the pipeline. |
|
||||
| Step Param Easing | Experimental per-step parameter easing for denoising steps |
|
||||
| String Primitive Collection | A collection of string primitive values |
|
||||
| String Primitive | A string primitive value |
|
||||
| Subtract Integers | Subtracts two numbers |
|
||||
|
||||
@@ -110,7 +110,7 @@ async def cancel_by_batch_ids(
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/cancel_by_destination",
|
||||
operation_id="cancel_by_destination",
|
||||
responses={200: {"model": CancelByBatchIDsResult}},
|
||||
responses={200: {"model": CancelByDestinationResult}},
|
||||
)
|
||||
async def cancel_by_destination(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
|
||||
@@ -63,6 +63,7 @@ class Classification(str, Enum, metaclass=MetaEnum):
|
||||
- `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation.
|
||||
- `Deprecated`: The invocation is deprecated and may be removed in a future version.
|
||||
- `Internal`: The invocation is not intended for use by end-users. It may be changed or removed at any time, but is exposed for users to play with.
|
||||
- `Special`: The invocation is a special case and does not fit into any of the other classifications.
|
||||
"""
|
||||
|
||||
Stable = "stable"
|
||||
@@ -70,6 +71,7 @@ class Classification(str, Enum, metaclass=MetaEnum):
|
||||
Prototype = "prototype"
|
||||
Deprecated = "deprecated"
|
||||
Internal = "internal"
|
||||
Special = "special"
|
||||
|
||||
|
||||
class UIConfigBase(BaseModel):
|
||||
|
||||
@@ -1,98 +1,120 @@
|
||||
from typing import Any, Union
|
||||
from typing import Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
from PIL import Image
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, LatentsField
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, LatentsField
|
||||
from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
def slerp(
|
||||
t: Union[float, np.ndarray],
|
||||
v0: Union[torch.Tensor, np.ndarray],
|
||||
v1: Union[torch.Tensor, np.ndarray],
|
||||
device: torch.device,
|
||||
DOT_THRESHOLD: float = 0.9995,
|
||||
):
|
||||
"""
|
||||
Spherical linear interpolation
|
||||
Args:
|
||||
t (float/np.ndarray): Float value between 0.0 and 1.0
|
||||
v0 (np.ndarray): Starting vector
|
||||
v1 (np.ndarray): Final vector
|
||||
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
||||
colineal. Not recommended to alter this.
|
||||
Returns:
|
||||
v2 (np.ndarray): Interpolation vector between v0 and v1
|
||||
"""
|
||||
inputs_are_torch = False
|
||||
if not isinstance(v0, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v0 = v0.detach().cpu().numpy()
|
||||
if not isinstance(v1, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v1 = v1.detach().cpu().numpy()
|
||||
|
||||
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
||||
if np.abs(dot) > DOT_THRESHOLD:
|
||||
v2 = (1 - t) * v0 + t * v1
|
||||
else:
|
||||
theta_0 = np.arccos(dot)
|
||||
sin_theta_0 = np.sin(theta_0)
|
||||
theta_t = theta_0 * t
|
||||
sin_theta_t = np.sin(theta_t)
|
||||
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
||||
s1 = sin_theta_t / sin_theta_0
|
||||
v2 = s0 * v0 + s1 * v1
|
||||
|
||||
if inputs_are_torch:
|
||||
v2 = torch.from_numpy(v2).to(device)
|
||||
|
||||
return v2
|
||||
|
||||
|
||||
@invocation(
|
||||
"lblend",
|
||||
title="Blend Latents",
|
||||
tags=["latents", "blend"],
|
||||
tags=["latents", "blend", "mask"],
|
||||
category="latents",
|
||||
version="1.0.3",
|
||||
version="1.1.0",
|
||||
)
|
||||
class BlendLatentsInvocation(BaseInvocation):
|
||||
"""Blend two latents using a given alpha. Latents must have same size."""
|
||||
"""Blend two latents using a given alpha. If a mask is provided, the second latents will be masked before blending.
|
||||
Latents must have same size. Masking functionality added by @dwringer."""
|
||||
|
||||
latents_a: LatentsField = InputField(
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
latents_b: LatentsField = InputField(
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
alpha: float = InputField(default=0.5, description=FieldDescriptions.blend_alpha)
|
||||
latents_a: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
latents_b: LatentsField = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
||||
mask: Optional[ImageField] = InputField(default=None, description="Mask for blending in latents B")
|
||||
alpha: float = InputField(ge=0, default=0.5, description=FieldDescriptions.blend_alpha)
|
||||
|
||||
def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor:
|
||||
if mask_image.mode != "L":
|
||||
mask_image = mask_image.convert("L")
|
||||
mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
|
||||
if mask_tensor.dim() == 3:
|
||||
mask_tensor = mask_tensor.unsqueeze(0)
|
||||
return mask_tensor
|
||||
|
||||
def replace_tensor_from_masked_tensor(
|
||||
self, tensor: torch.Tensor, other_tensor: torch.Tensor, mask_tensor: torch.Tensor
|
||||
):
|
||||
output = tensor.clone()
|
||||
mask_tensor = mask_tensor.expand(output.shape)
|
||||
if output.dtype != torch.float16:
|
||||
output = torch.add(output, mask_tensor * torch.sub(other_tensor, tensor))
|
||||
else:
|
||||
output = torch.add(output, mask_tensor.half() * torch.sub(other_tensor, tensor))
|
||||
return output
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
latents_a = context.tensors.load(self.latents_a.latents_name)
|
||||
latents_b = context.tensors.load(self.latents_b.latents_name)
|
||||
if self.mask is None:
|
||||
mask_tensor = torch.zeros(latents_a.shape[-2:])
|
||||
else:
|
||||
mask_tensor = self.prep_mask_tensor(context.images.get_pil(self.mask.image_name))
|
||||
mask_tensor = tv_resize(mask_tensor, latents_a.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
|
||||
latents_b = self.replace_tensor_from_masked_tensor(latents_b, latents_a, mask_tensor)
|
||||
|
||||
if latents_a.shape != latents_b.shape:
|
||||
raise Exception("Latents to blend must be the same size.")
|
||||
raise ValueError("Latents to blend must be the same size.")
|
||||
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
def slerp(
|
||||
t: Union[float, npt.NDArray[Any]], # FIXME: maybe use np.float32 here?
|
||||
v0: Union[torch.Tensor, npt.NDArray[Any]],
|
||||
v1: Union[torch.Tensor, npt.NDArray[Any]],
|
||||
DOT_THRESHOLD: float = 0.9995,
|
||||
) -> Union[torch.Tensor, npt.NDArray[Any]]:
|
||||
"""
|
||||
Spherical linear interpolation
|
||||
Args:
|
||||
t (float/np.ndarray): Float value between 0.0 and 1.0
|
||||
v0 (np.ndarray): Starting vector
|
||||
v1 (np.ndarray): Final vector
|
||||
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
||||
colineal. Not recommended to alter this.
|
||||
Returns:
|
||||
v2 (np.ndarray): Interpolation vector between v0 and v1
|
||||
"""
|
||||
inputs_are_torch = False
|
||||
if not isinstance(v0, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v0 = v0.detach().cpu().numpy()
|
||||
if not isinstance(v1, np.ndarray):
|
||||
inputs_are_torch = True
|
||||
v1 = v1.detach().cpu().numpy()
|
||||
|
||||
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
||||
if np.abs(dot) > DOT_THRESHOLD:
|
||||
v2 = (1 - t) * v0 + t * v1
|
||||
else:
|
||||
theta_0 = np.arccos(dot)
|
||||
sin_theta_0 = np.sin(theta_0)
|
||||
theta_t = theta_0 * t
|
||||
sin_theta_t = np.sin(theta_t)
|
||||
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
||||
s1 = sin_theta_t / sin_theta_0
|
||||
v2 = s0 * v0 + s1 * v1
|
||||
|
||||
if inputs_are_torch:
|
||||
v2_torch: torch.Tensor = torch.from_numpy(v2).to(device)
|
||||
return v2_torch
|
||||
else:
|
||||
assert isinstance(v2, np.ndarray)
|
||||
return v2
|
||||
|
||||
# blend
|
||||
bl = slerp(self.alpha, latents_a, latents_b)
|
||||
assert isinstance(bl, torch.Tensor)
|
||||
blended_latents: torch.Tensor = bl # for type checking convenience
|
||||
blended_latents = slerp(self.alpha, latents_a, latents_b, device)
|
||||
|
||||
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||
blended_latents = blended_latents.to("cpu")
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
name = context.tensors.save(tensor=blended_latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=blended_latents, seed=self.latents_a.seed)
|
||||
return LatentsOutput.build(latents_name=name, latents=blended_latents)
|
||||
|
||||
1563
invokeai/app/invocations/composition-nodes.py
Normal file
1563
invokeai/app/invocations/composition-nodes.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -250,6 +250,11 @@ class FluxConditioningField(BaseModel):
|
||||
"""A conditioning tensor primitive value"""
|
||||
|
||||
conditioning_name: str = Field(description="The name of conditioning tensor")
|
||||
mask: Optional[TensorField] = Field(
|
||||
default=None,
|
||||
description="The mask associated with this conditioning tensor. Excluded regions should be set to False, "
|
||||
"included regions should be set to True.",
|
||||
)
|
||||
|
||||
|
||||
class SD3ConditioningField(BaseModel):
|
||||
|
||||
@@ -30,6 +30,7 @@ from invokeai.backend.flux.controlnet.xlabs_controlnet_flux import XLabsControlN
|
||||
from invokeai.backend.flux.denoise import denoise
|
||||
from invokeai.backend.flux.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
|
||||
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.ip_adapter.xlabs_ip_adapter_flux import XlabsIpAdapterFlux
|
||||
@@ -42,6 +43,7 @@ from invokeai.backend.flux.sampling_utils import (
|
||||
pack,
|
||||
unpack,
|
||||
)
|
||||
from invokeai.backend.flux.text_conditioning import FluxTextConditioning
|
||||
from invokeai.backend.lora.conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
|
||||
from invokeai.backend.lora.lora_model_raw import LoRAModelRaw
|
||||
from invokeai.backend.lora.lora_patcher import LoRAPatcher
|
||||
@@ -56,7 +58,7 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
title="FLUX Denoise",
|
||||
tags=["image", "flux"],
|
||||
category="image",
|
||||
version="3.2.1",
|
||||
version="3.2.2",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
@@ -87,10 +89,10 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
input=Input.Connection,
|
||||
title="Transformer",
|
||||
)
|
||||
positive_text_conditioning: FluxConditioningField = InputField(
|
||||
positive_text_conditioning: FluxConditioningField | list[FluxConditioningField] = InputField(
|
||||
description=FieldDescriptions.positive_cond, input=Input.Connection
|
||||
)
|
||||
negative_text_conditioning: FluxConditioningField | None = InputField(
|
||||
negative_text_conditioning: FluxConditioningField | list[FluxConditioningField] | None = InputField(
|
||||
default=None,
|
||||
description="Negative conditioning tensor. Can be None if cfg_scale is 1.0.",
|
||||
input=Input.Connection,
|
||||
@@ -139,36 +141,12 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
name = context.tensors.save(tensor=latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
||||
|
||||
def _load_text_conditioning(
|
||||
self, context: InvocationContext, conditioning_name: str, dtype: torch.dtype
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
# Load the conditioning data.
|
||||
cond_data = context.conditioning.load(conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
flux_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(flux_conditioning, FLUXConditioningInfo)
|
||||
flux_conditioning = flux_conditioning.to(dtype=dtype)
|
||||
t5_embeddings = flux_conditioning.t5_embeds
|
||||
clip_embeddings = flux_conditioning.clip_embeds
|
||||
return t5_embeddings, clip_embeddings
|
||||
|
||||
def _run_diffusion(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
):
|
||||
inference_dtype = torch.bfloat16
|
||||
|
||||
# Load the conditioning data.
|
||||
pos_t5_embeddings, pos_clip_embeddings = self._load_text_conditioning(
|
||||
context, self.positive_text_conditioning.conditioning_name, inference_dtype
|
||||
)
|
||||
neg_t5_embeddings: torch.Tensor | None = None
|
||||
neg_clip_embeddings: torch.Tensor | None = None
|
||||
if self.negative_text_conditioning is not None:
|
||||
neg_t5_embeddings, neg_clip_embeddings = self._load_text_conditioning(
|
||||
context, self.negative_text_conditioning.conditioning_name, inference_dtype
|
||||
)
|
||||
|
||||
# Load the input latents, if provided.
|
||||
init_latents = context.tensors.load(self.latents.latents_name) if self.latents else None
|
||||
if init_latents is not None:
|
||||
@@ -183,15 +161,45 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
dtype=inference_dtype,
|
||||
seed=self.seed,
|
||||
)
|
||||
b, _c, latent_h, latent_w = noise.shape
|
||||
packed_h = latent_h // 2
|
||||
packed_w = latent_w // 2
|
||||
|
||||
# Load the conditioning data.
|
||||
pos_text_conditionings = self._load_text_conditioning(
|
||||
context=context,
|
||||
cond_field=self.positive_text_conditioning,
|
||||
packed_height=packed_h,
|
||||
packed_width=packed_w,
|
||||
dtype=inference_dtype,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
)
|
||||
neg_text_conditionings: list[FluxTextConditioning] | None = None
|
||||
if self.negative_text_conditioning is not None:
|
||||
neg_text_conditionings = self._load_text_conditioning(
|
||||
context=context,
|
||||
cond_field=self.negative_text_conditioning,
|
||||
packed_height=packed_h,
|
||||
packed_width=packed_w,
|
||||
dtype=inference_dtype,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
)
|
||||
pos_regional_prompting_extension = RegionalPromptingExtension.from_text_conditioning(
|
||||
pos_text_conditionings, img_seq_len=packed_h * packed_w
|
||||
)
|
||||
neg_regional_prompting_extension = (
|
||||
RegionalPromptingExtension.from_text_conditioning(neg_text_conditionings, img_seq_len=packed_h * packed_w)
|
||||
if neg_text_conditionings
|
||||
else None
|
||||
)
|
||||
|
||||
transformer_info = context.models.load(self.transformer.transformer)
|
||||
is_schnell = "schnell" in transformer_info.config.config_path
|
||||
|
||||
# Calculate the timestep schedule.
|
||||
image_seq_len = noise.shape[-1] * noise.shape[-2] // 4
|
||||
timesteps = get_schedule(
|
||||
num_steps=self.num_steps,
|
||||
image_seq_len=image_seq_len,
|
||||
image_seq_len=packed_h * packed_w,
|
||||
shift=not is_schnell,
|
||||
)
|
||||
|
||||
@@ -228,28 +236,17 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
inpaint_mask = self._prep_inpaint_mask(context, x)
|
||||
|
||||
b, _c, latent_h, latent_w = x.shape
|
||||
img_ids = generate_img_ids(h=latent_h, w=latent_w, batch_size=b, device=x.device, dtype=x.dtype)
|
||||
|
||||
pos_bs, pos_t5_seq_len, _ = pos_t5_embeddings.shape
|
||||
pos_txt_ids = torch.zeros(
|
||||
pos_bs, pos_t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device()
|
||||
)
|
||||
neg_txt_ids: torch.Tensor | None = None
|
||||
if neg_t5_embeddings is not None:
|
||||
neg_bs, neg_t5_seq_len, _ = neg_t5_embeddings.shape
|
||||
neg_txt_ids = torch.zeros(
|
||||
neg_bs, neg_t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device()
|
||||
)
|
||||
|
||||
# Pack all latent tensors.
|
||||
init_latents = pack(init_latents) if init_latents is not None else None
|
||||
inpaint_mask = pack(inpaint_mask) if inpaint_mask is not None else None
|
||||
noise = pack(noise)
|
||||
x = pack(x)
|
||||
|
||||
# Now that we have 'packed' the latent tensors, verify that we calculated the image_seq_len correctly.
|
||||
assert image_seq_len == x.shape[1]
|
||||
# Now that we have 'packed' the latent tensors, verify that we calculated the image_seq_len, packed_h, and
|
||||
# packed_w correctly.
|
||||
assert packed_h * packed_w == x.shape[1]
|
||||
|
||||
# Prepare inpaint extension.
|
||||
inpaint_extension: InpaintExtension | None = None
|
||||
@@ -338,12 +335,8 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
model=transformer,
|
||||
img=x,
|
||||
img_ids=img_ids,
|
||||
txt=pos_t5_embeddings,
|
||||
txt_ids=pos_txt_ids,
|
||||
vec=pos_clip_embeddings,
|
||||
neg_txt=neg_t5_embeddings,
|
||||
neg_txt_ids=neg_txt_ids,
|
||||
neg_vec=neg_clip_embeddings,
|
||||
pos_regional_prompting_extension=pos_regional_prompting_extension,
|
||||
neg_regional_prompting_extension=neg_regional_prompting_extension,
|
||||
timesteps=timesteps,
|
||||
step_callback=self._build_step_callback(context),
|
||||
guidance=self.guidance,
|
||||
@@ -357,6 +350,43 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
x = unpack(x.float(), self.height, self.width)
|
||||
return x
|
||||
|
||||
def _load_text_conditioning(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
cond_field: FluxConditioningField | list[FluxConditioningField],
|
||||
packed_height: int,
|
||||
packed_width: int,
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
) -> list[FluxTextConditioning]:
|
||||
"""Load text conditioning data from a FluxConditioningField or a list of FluxConditioningFields."""
|
||||
# Normalize to a list of FluxConditioningFields.
|
||||
cond_list = [cond_field] if isinstance(cond_field, FluxConditioningField) else cond_field
|
||||
|
||||
text_conditionings: list[FluxTextConditioning] = []
|
||||
for cond_field in cond_list:
|
||||
# Load the text embeddings.
|
||||
cond_data = context.conditioning.load(cond_field.conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
flux_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(flux_conditioning, FLUXConditioningInfo)
|
||||
flux_conditioning = flux_conditioning.to(dtype=dtype, device=device)
|
||||
t5_embeddings = flux_conditioning.t5_embeds
|
||||
clip_embeddings = flux_conditioning.clip_embeds
|
||||
|
||||
# Load the mask, if provided.
|
||||
mask: Optional[torch.Tensor] = None
|
||||
if cond_field.mask is not None:
|
||||
mask = context.tensors.load(cond_field.mask.tensor_name)
|
||||
mask = mask.to(device=device)
|
||||
mask = RegionalPromptingExtension.preprocess_regional_prompt_mask(
|
||||
mask, packed_height, packed_width, dtype, device
|
||||
)
|
||||
|
||||
text_conditionings.append(FluxTextConditioning(t5_embeddings, clip_embeddings, mask))
|
||||
|
||||
return text_conditionings
|
||||
|
||||
@classmethod
|
||||
def prep_cfg_scale(
|
||||
cls, cfg_scale: float | list[float], timesteps: list[float], cfg_scale_start_step: int, cfg_scale_end_step: int
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
from contextlib import ExitStack
|
||||
from typing import Iterator, Literal, Tuple
|
||||
from typing import Iterator, Literal, Optional, Tuple
|
||||
|
||||
import torch
|
||||
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
FluxConditioningField,
|
||||
Input,
|
||||
InputField,
|
||||
TensorField,
|
||||
UIComponent,
|
||||
)
|
||||
from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
||||
from invokeai.app.invocations.primitives import FluxConditioningOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
@@ -22,7 +29,7 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Condit
|
||||
title="FLUX Text Encoding",
|
||||
tags=["prompt", "conditioning", "flux"],
|
||||
category="conditioning",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxTextEncoderInvocation(BaseInvocation):
|
||||
@@ -41,7 +48,10 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
t5_max_seq_len: Literal[256, 512] = InputField(
|
||||
description="Max sequence length for the T5 encoder. Expected to be 256 for FLUX schnell models and 512 for FLUX dev models."
|
||||
)
|
||||
prompt: str = InputField(description="Text prompt to encode.")
|
||||
prompt: str = InputField(description="Text prompt to encode.", ui_component=UIComponent.Textarea)
|
||||
mask: Optional[TensorField] = InputField(
|
||||
default=None, description="A mask defining the region that this conditioning prompt applies to."
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> FluxConditioningOutput:
|
||||
@@ -54,7 +64,9 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
conditioning_name = context.conditioning.save(conditioning_data)
|
||||
return FluxConditioningOutput.build(conditioning_name)
|
||||
return FluxConditioningOutput(
|
||||
conditioning=FluxConditioningField(conditioning_name=conditioning_name, mask=self.mask)
|
||||
)
|
||||
|
||||
def _t5_encode(self, context: InvocationContext) -> torch.Tensor:
|
||||
t5_tokenizer_info = context.models.load(self.t5_encoder.tokenizer)
|
||||
|
||||
59
invokeai/app/invocations/image_panels.py
Normal file
59
invokeai/app/invocations/image_panels.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from pydantic import ValidationInfo, field_validator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import InputField, OutputField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
|
||||
|
||||
@invocation_output("image_panel_coordinate_output")
|
||||
class ImagePanelCoordinateOutput(BaseInvocationOutput):
|
||||
x_left: int = OutputField(description="The left x-coordinate of the panel.")
|
||||
y_top: int = OutputField(description="The top y-coordinate of the panel.")
|
||||
width: int = OutputField(description="The width of the panel.")
|
||||
height: int = OutputField(description="The height of the panel.")
|
||||
|
||||
|
||||
@invocation(
|
||||
"image_panel_layout",
|
||||
title="Image Panel Layout",
|
||||
tags=["image", "panel", "layout"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class ImagePanelLayoutInvocation(BaseInvocation):
|
||||
"""Get the coordinates of a single panel in a grid. (If the full image shape cannot be divided evenly into panels,
|
||||
then the grid may not cover the entire image.)
|
||||
"""
|
||||
|
||||
width: int = InputField(description="The width of the entire grid.")
|
||||
height: int = InputField(description="The height of the entire grid.")
|
||||
num_cols: int = InputField(ge=1, default=1, description="The number of columns in the grid.")
|
||||
num_rows: int = InputField(ge=1, default=1, description="The number of rows in the grid.")
|
||||
panel_col_idx: int = InputField(ge=0, default=0, description="The column index of the panel to be processed.")
|
||||
panel_row_idx: int = InputField(ge=0, default=0, description="The row index of the panel to be processed.")
|
||||
|
||||
@field_validator("panel_col_idx")
|
||||
def validate_panel_col_idx(cls, v: int, info: ValidationInfo) -> int:
|
||||
if v < 0 or v >= info.data["num_cols"]:
|
||||
raise ValueError(f"panel_col_idx must be between 0 and {info.data['num_cols'] - 1}")
|
||||
return v
|
||||
|
||||
@field_validator("panel_row_idx")
|
||||
def validate_panel_row_idx(cls, v: int, info: ValidationInfo) -> int:
|
||||
if v < 0 or v >= info.data["num_rows"]:
|
||||
raise ValueError(f"panel_row_idx must be between 0 and {info.data['num_rows'] - 1}")
|
||||
return v
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImagePanelCoordinateOutput:
|
||||
x_left = self.panel_col_idx * (self.width // self.num_cols)
|
||||
y_top = self.panel_row_idx * (self.height // self.num_rows)
|
||||
width = self.width // self.num_cols
|
||||
height = self.height // self.num_rows
|
||||
return ImagePanelCoordinateOutput(x_left=x_left, y_top=y_top, width=width, height=height)
|
||||
@@ -1,43 +1,4 @@
|
||||
import io
|
||||
from typing import Literal, Optional
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import PIL.Image
|
||||
from easing_functions import (
|
||||
BackEaseIn,
|
||||
BackEaseInOut,
|
||||
BackEaseOut,
|
||||
BounceEaseIn,
|
||||
BounceEaseInOut,
|
||||
BounceEaseOut,
|
||||
CircularEaseIn,
|
||||
CircularEaseInOut,
|
||||
CircularEaseOut,
|
||||
CubicEaseIn,
|
||||
CubicEaseInOut,
|
||||
CubicEaseOut,
|
||||
ElasticEaseIn,
|
||||
ElasticEaseInOut,
|
||||
ElasticEaseOut,
|
||||
ExponentialEaseIn,
|
||||
ExponentialEaseInOut,
|
||||
ExponentialEaseOut,
|
||||
LinearInOut,
|
||||
QuadEaseIn,
|
||||
QuadEaseInOut,
|
||||
QuadEaseOut,
|
||||
QuarticEaseIn,
|
||||
QuarticEaseInOut,
|
||||
QuarticEaseOut,
|
||||
QuinticEaseIn,
|
||||
QuinticEaseInOut,
|
||||
QuinticEaseOut,
|
||||
SineEaseIn,
|
||||
SineEaseInOut,
|
||||
SineEaseOut,
|
||||
)
|
||||
from matplotlib.ticker import MaxNLocator
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.fields import InputField
|
||||
@@ -65,191 +26,3 @@ class FloatLinearRangeInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
|
||||
param_list = list(np.linspace(self.start, self.stop, self.steps))
|
||||
return FloatCollectionOutput(collection=param_list)
|
||||
|
||||
|
||||
EASING_FUNCTIONS_MAP = {
|
||||
"Linear": LinearInOut,
|
||||
"QuadIn": QuadEaseIn,
|
||||
"QuadOut": QuadEaseOut,
|
||||
"QuadInOut": QuadEaseInOut,
|
||||
"CubicIn": CubicEaseIn,
|
||||
"CubicOut": CubicEaseOut,
|
||||
"CubicInOut": CubicEaseInOut,
|
||||
"QuarticIn": QuarticEaseIn,
|
||||
"QuarticOut": QuarticEaseOut,
|
||||
"QuarticInOut": QuarticEaseInOut,
|
||||
"QuinticIn": QuinticEaseIn,
|
||||
"QuinticOut": QuinticEaseOut,
|
||||
"QuinticInOut": QuinticEaseInOut,
|
||||
"SineIn": SineEaseIn,
|
||||
"SineOut": SineEaseOut,
|
||||
"SineInOut": SineEaseInOut,
|
||||
"CircularIn": CircularEaseIn,
|
||||
"CircularOut": CircularEaseOut,
|
||||
"CircularInOut": CircularEaseInOut,
|
||||
"ExponentialIn": ExponentialEaseIn,
|
||||
"ExponentialOut": ExponentialEaseOut,
|
||||
"ExponentialInOut": ExponentialEaseInOut,
|
||||
"ElasticIn": ElasticEaseIn,
|
||||
"ElasticOut": ElasticEaseOut,
|
||||
"ElasticInOut": ElasticEaseInOut,
|
||||
"BackIn": BackEaseIn,
|
||||
"BackOut": BackEaseOut,
|
||||
"BackInOut": BackEaseInOut,
|
||||
"BounceIn": BounceEaseIn,
|
||||
"BounceOut": BounceEaseOut,
|
||||
"BounceInOut": BounceEaseInOut,
|
||||
}
|
||||
|
||||
EASING_FUNCTION_KEYS = Literal[tuple(EASING_FUNCTIONS_MAP.keys())]
|
||||
|
||||
|
||||
# actually I think for now could just use CollectionOutput (which is list[Any]
|
||||
@invocation(
|
||||
"step_param_easing",
|
||||
title="Step Param Easing",
|
||||
tags=["step", "easing"],
|
||||
category="step",
|
||||
version="1.0.2",
|
||||
)
|
||||
class StepParamEasingInvocation(BaseInvocation):
|
||||
"""Experimental per-step parameter easing for denoising steps"""
|
||||
|
||||
easing: EASING_FUNCTION_KEYS = InputField(default="Linear", description="The easing function to use")
|
||||
num_steps: int = InputField(default=20, description="number of denoising steps")
|
||||
start_value: float = InputField(default=0.0, description="easing starting value")
|
||||
end_value: float = InputField(default=1.0, description="easing ending value")
|
||||
start_step_percent: float = InputField(default=0.0, description="fraction of steps at which to start easing")
|
||||
end_step_percent: float = InputField(default=1.0, description="fraction of steps after which to end easing")
|
||||
# if None, then start_value is used prior to easing start
|
||||
pre_start_value: Optional[float] = InputField(default=None, description="value before easing start")
|
||||
# if None, then end value is used prior to easing end
|
||||
post_end_value: Optional[float] = InputField(default=None, description="value after easing end")
|
||||
mirror: bool = InputField(default=False, description="include mirror of easing function")
|
||||
# FIXME: add alt_mirror option (alternative to default or mirror), or remove entirely
|
||||
# alt_mirror: bool = InputField(default=False, description="alternative mirroring by dual easing")
|
||||
show_easing_plot: bool = InputField(default=False, description="show easing plot")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
|
||||
log_diagnostics = False
|
||||
# convert from start_step_percent to nearest step <= (steps * start_step_percent)
|
||||
# start_step = int(np.floor(self.num_steps * self.start_step_percent))
|
||||
start_step = int(np.round(self.num_steps * self.start_step_percent))
|
||||
# convert from end_step_percent to nearest step >= (steps * end_step_percent)
|
||||
# end_step = int(np.ceil((self.num_steps - 1) * self.end_step_percent))
|
||||
end_step = int(np.round((self.num_steps - 1) * self.end_step_percent))
|
||||
|
||||
# end_step = int(np.ceil(self.num_steps * self.end_step_percent))
|
||||
num_easing_steps = end_step - start_step + 1
|
||||
|
||||
# num_presteps = max(start_step - 1, 0)
|
||||
num_presteps = start_step
|
||||
num_poststeps = self.num_steps - (num_presteps + num_easing_steps)
|
||||
prelist = list(num_presteps * [self.pre_start_value])
|
||||
postlist = list(num_poststeps * [self.post_end_value])
|
||||
|
||||
if log_diagnostics:
|
||||
context.logger.debug("start_step: " + str(start_step))
|
||||
context.logger.debug("end_step: " + str(end_step))
|
||||
context.logger.debug("num_easing_steps: " + str(num_easing_steps))
|
||||
context.logger.debug("num_presteps: " + str(num_presteps))
|
||||
context.logger.debug("num_poststeps: " + str(num_poststeps))
|
||||
context.logger.debug("prelist size: " + str(len(prelist)))
|
||||
context.logger.debug("postlist size: " + str(len(postlist)))
|
||||
context.logger.debug("prelist: " + str(prelist))
|
||||
context.logger.debug("postlist: " + str(postlist))
|
||||
|
||||
easing_class = EASING_FUNCTIONS_MAP[self.easing]
|
||||
if log_diagnostics:
|
||||
context.logger.debug("easing class: " + str(easing_class))
|
||||
easing_list = []
|
||||
if self.mirror: # "expected" mirroring
|
||||
# if number of steps is even, squeeze duration down to (number_of_steps)/2
|
||||
# and create reverse copy of list to append
|
||||
# if number of steps is odd, squeeze duration down to ceil(number_of_steps/2)
|
||||
# and create reverse copy of list[1:end-1]
|
||||
# but if even then number_of_steps/2 === ceil(number_of_steps/2), so can just use ceil always
|
||||
|
||||
base_easing_duration = int(np.ceil(num_easing_steps / 2.0))
|
||||
if log_diagnostics:
|
||||
context.logger.debug("base easing duration: " + str(base_easing_duration))
|
||||
even_num_steps = num_easing_steps % 2 == 0 # even number of steps
|
||||
easing_function = easing_class(
|
||||
start=self.start_value,
|
||||
end=self.end_value,
|
||||
duration=base_easing_duration - 1,
|
||||
)
|
||||
base_easing_vals = []
|
||||
for step_index in range(base_easing_duration):
|
||||
easing_val = easing_function.ease(step_index)
|
||||
base_easing_vals.append(easing_val)
|
||||
if log_diagnostics:
|
||||
context.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(easing_val))
|
||||
if even_num_steps:
|
||||
mirror_easing_vals = list(reversed(base_easing_vals))
|
||||
else:
|
||||
mirror_easing_vals = list(reversed(base_easing_vals[0:-1]))
|
||||
if log_diagnostics:
|
||||
context.logger.debug("base easing vals: " + str(base_easing_vals))
|
||||
context.logger.debug("mirror easing vals: " + str(mirror_easing_vals))
|
||||
easing_list = base_easing_vals + mirror_easing_vals
|
||||
|
||||
# FIXME: add alt_mirror option (alternative to default or mirror), or remove entirely
|
||||
# elif self.alt_mirror: # function mirroring (unintuitive behavior (at least to me))
|
||||
# # half_ease_duration = round(num_easing_steps - 1 / 2)
|
||||
# half_ease_duration = round((num_easing_steps - 1) / 2)
|
||||
# easing_function = easing_class(start=self.start_value,
|
||||
# end=self.end_value,
|
||||
# duration=half_ease_duration,
|
||||
# )
|
||||
#
|
||||
# mirror_function = easing_class(start=self.end_value,
|
||||
# end=self.start_value,
|
||||
# duration=half_ease_duration,
|
||||
# )
|
||||
# for step_index in range(num_easing_steps):
|
||||
# if step_index <= half_ease_duration:
|
||||
# step_val = easing_function.ease(step_index)
|
||||
# else:
|
||||
# step_val = mirror_function.ease(step_index - half_ease_duration)
|
||||
# easing_list.append(step_val)
|
||||
# if log_diagnostics: logger.debug(step_index, step_val)
|
||||
#
|
||||
|
||||
else: # no mirroring (default)
|
||||
easing_function = easing_class(
|
||||
start=self.start_value,
|
||||
end=self.end_value,
|
||||
duration=num_easing_steps - 1,
|
||||
)
|
||||
for step_index in range(num_easing_steps):
|
||||
step_val = easing_function.ease(step_index)
|
||||
easing_list.append(step_val)
|
||||
if log_diagnostics:
|
||||
context.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(step_val))
|
||||
|
||||
if log_diagnostics:
|
||||
context.logger.debug("prelist size: " + str(len(prelist)))
|
||||
context.logger.debug("easing_list size: " + str(len(easing_list)))
|
||||
context.logger.debug("postlist size: " + str(len(postlist)))
|
||||
|
||||
param_list = prelist + easing_list + postlist
|
||||
|
||||
if self.show_easing_plot:
|
||||
plt.figure()
|
||||
plt.xlabel("Step")
|
||||
plt.ylabel("Param Value")
|
||||
plt.title("Per-Step Values Based On Easing: " + self.easing)
|
||||
plt.bar(range(len(param_list)), param_list)
|
||||
# plt.plot(param_list)
|
||||
ax = plt.gca()
|
||||
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
|
||||
buf = io.BytesIO()
|
||||
plt.savefig(buf, format="png")
|
||||
buf.seek(0)
|
||||
im = PIL.Image.open(buf)
|
||||
im.show()
|
||||
buf.close()
|
||||
|
||||
# output array of size steps, each entry list[i] is param value for step i
|
||||
return FloatCollectionOutput(collection=param_list)
|
||||
|
||||
@@ -4,7 +4,13 @@ from typing import Optional
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
BoundingBoxField,
|
||||
@@ -533,3 +539,23 @@ class BoundingBoxInvocation(BaseInvocation):
|
||||
|
||||
|
||||
# endregion
|
||||
|
||||
|
||||
@invocation(
|
||||
"image_batch",
|
||||
title="Image Batch",
|
||||
tags=["primitives", "image", "batch", "internal"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class ImageBatchInvocation(BaseInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each image in the batch."""
|
||||
|
||||
images: list[ImageField] = InputField(min_length=1, description="The images to batch over", input=Input.Direct)
|
||||
|
||||
def __init__(self):
|
||||
raise NotImplementedError("This class should never be executed or instantiated directly.")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
raise NotImplementedError("This class should never be executed or instantiated directly.")
|
||||
|
||||
@@ -86,7 +86,7 @@ class ModelLoadService(ModelLoadServiceBase):
|
||||
|
||||
def torch_load_file(checkpoint: Path) -> AnyModel:
|
||||
scan_result = scan_file_path(checkpoint)
|
||||
if scan_result.infected_files != 0:
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise Exception("The model at {checkpoint} is potentially infected by malware. Aborting load.")
|
||||
result = torch_load(checkpoint, map_location="cpu")
|
||||
return result
|
||||
|
||||
@@ -378,6 +378,9 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
self._poll_now()
|
||||
|
||||
async def _on_queue_item_status_changed(self, event: FastAPIEvent[QueueItemStatusChangedEvent]) -> None:
|
||||
# Make sure the cancel event is for the currently processing queue item
|
||||
if self._queue_item and self._queue_item.item_id != event[1].item_id:
|
||||
return
|
||||
if self._queue_item and event[1].status in ["completed", "failed", "canceled"]:
|
||||
# When the queue item is canceled via HTTP, the queue item status is set to `"canceled"` and this event is
|
||||
# emitted. We need to respond to this event and stop graph execution. This is done by setting the cancel
|
||||
|
||||
@@ -16,6 +16,7 @@ from pydantic import (
|
||||
from pydantic_core import to_jsonable_python
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||
from invokeai.app.invocations.fields import ImageField
|
||||
from invokeai.app.services.shared.graph import Graph, GraphExecutionState, NodeNotFoundError
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import (
|
||||
WorkflowWithoutID,
|
||||
@@ -51,11 +52,7 @@ class SessionQueueItemNotFoundError(ValueError):
|
||||
|
||||
# region Batch
|
||||
|
||||
BatchDataType = Union[
|
||||
StrictStr,
|
||||
float,
|
||||
int,
|
||||
]
|
||||
BatchDataType = Union[StrictStr, float, int, ImageField]
|
||||
|
||||
|
||||
class NodeFieldValue(BaseModel):
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import einops
|
||||
import torch
|
||||
|
||||
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.math import attention
|
||||
from invokeai.backend.flux.modules.layers import DoubleStreamBlock
|
||||
from invokeai.backend.flux.modules.layers import DoubleStreamBlock, SingleStreamBlock
|
||||
|
||||
|
||||
class CustomDoubleStreamBlockProcessor:
|
||||
@@ -13,7 +14,12 @@ class CustomDoubleStreamBlockProcessor:
|
||||
|
||||
@staticmethod
|
||||
def _double_stream_block_forward(
|
||||
block: DoubleStreamBlock, img: torch.Tensor, txt: torch.Tensor, vec: torch.Tensor, pe: torch.Tensor
|
||||
block: DoubleStreamBlock,
|
||||
img: torch.Tensor,
|
||||
txt: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
attn_mask: torch.Tensor | None = None,
|
||||
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""This function is a direct copy of DoubleStreamBlock.forward(), but it returns some of the intermediate
|
||||
values.
|
||||
@@ -40,7 +46,7 @@ class CustomDoubleStreamBlockProcessor:
|
||||
k = torch.cat((txt_k, img_k), dim=2)
|
||||
v = torch.cat((txt_v, img_v), dim=2)
|
||||
|
||||
attn = attention(q, k, v, pe=pe)
|
||||
attn = attention(q, k, v, pe=pe, attn_mask=attn_mask)
|
||||
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]
|
||||
|
||||
# calculate the img bloks
|
||||
@@ -63,11 +69,15 @@ class CustomDoubleStreamBlockProcessor:
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
ip_adapter_extensions: list[XLabsIPAdapterExtension],
|
||||
regional_prompting_extension: RegionalPromptingExtension,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""A custom implementation of DoubleStreamBlock.forward() with additional features:
|
||||
- IP-Adapter support
|
||||
"""
|
||||
img, txt, img_q = CustomDoubleStreamBlockProcessor._double_stream_block_forward(block, img, txt, vec, pe)
|
||||
attn_mask = regional_prompting_extension.get_double_stream_attn_mask(block_index)
|
||||
img, txt, img_q = CustomDoubleStreamBlockProcessor._double_stream_block_forward(
|
||||
block, img, txt, vec, pe, attn_mask=attn_mask
|
||||
)
|
||||
|
||||
# Apply IP-Adapter conditioning.
|
||||
for ip_adapter_extension in ip_adapter_extensions:
|
||||
@@ -81,3 +91,48 @@ class CustomDoubleStreamBlockProcessor:
|
||||
)
|
||||
|
||||
return img, txt
|
||||
|
||||
|
||||
class CustomSingleStreamBlockProcessor:
|
||||
"""A class containing a custom implementation of SingleStreamBlock.forward() with additional features (masking,
|
||||
etc.)
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _single_stream_block_forward(
|
||||
block: SingleStreamBlock,
|
||||
x: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
attn_mask: torch.Tensor | None = None,
|
||||
) -> torch.Tensor:
|
||||
"""This function is a direct copy of SingleStreamBlock.forward()."""
|
||||
mod, _ = block.modulation(vec)
|
||||
x_mod = (1 + mod.scale) * block.pre_norm(x) + mod.shift
|
||||
qkv, mlp = torch.split(block.linear1(x_mod), [3 * block.hidden_size, block.mlp_hidden_dim], dim=-1)
|
||||
|
||||
q, k, v = einops.rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=block.num_heads)
|
||||
q, k = block.norm(q, k, v)
|
||||
|
||||
# compute attention
|
||||
attn = attention(q, k, v, pe=pe, attn_mask=attn_mask)
|
||||
# compute activation in mlp stream, cat again and run second linear layer
|
||||
output = block.linear2(torch.cat((attn, block.mlp_act(mlp)), 2))
|
||||
return x + mod.gate * output
|
||||
|
||||
@staticmethod
|
||||
def custom_single_block_forward(
|
||||
timestep_index: int,
|
||||
total_num_timesteps: int,
|
||||
block_index: int,
|
||||
block: SingleStreamBlock,
|
||||
img: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
pe: torch.Tensor,
|
||||
regional_prompting_extension: RegionalPromptingExtension,
|
||||
) -> torch.Tensor:
|
||||
"""A custom implementation of SingleStreamBlock.forward() with additional features:
|
||||
- Masking
|
||||
"""
|
||||
attn_mask = regional_prompting_extension.get_single_stream_attn_mask(block_index)
|
||||
return CustomSingleStreamBlockProcessor._single_stream_block_forward(block, img, vec, pe, attn_mask=attn_mask)
|
||||
|
||||
@@ -7,6 +7,7 @@ from tqdm import tqdm
|
||||
from invokeai.backend.flux.controlnet.controlnet_flux_output import ControlNetFluxOutput, sum_controlnet_flux_outputs
|
||||
from invokeai.backend.flux.extensions.inpaint_extension import InpaintExtension
|
||||
from invokeai.backend.flux.extensions.instantx_controlnet_extension import InstantXControlNetExtension
|
||||
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_controlnet_extension import XLabsControlNetExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.model import Flux
|
||||
@@ -18,14 +19,8 @@ def denoise(
|
||||
# model input
|
||||
img: torch.Tensor,
|
||||
img_ids: torch.Tensor,
|
||||
# positive text conditioning
|
||||
txt: torch.Tensor,
|
||||
txt_ids: torch.Tensor,
|
||||
vec: torch.Tensor,
|
||||
# negative text conditioning
|
||||
neg_txt: torch.Tensor | None,
|
||||
neg_txt_ids: torch.Tensor | None,
|
||||
neg_vec: torch.Tensor | None,
|
||||
pos_regional_prompting_extension: RegionalPromptingExtension,
|
||||
neg_regional_prompting_extension: RegionalPromptingExtension | None,
|
||||
# sampling parameters
|
||||
timesteps: list[float],
|
||||
step_callback: Callable[[PipelineIntermediateState], None],
|
||||
@@ -61,9 +56,9 @@ def denoise(
|
||||
total_num_timesteps=total_steps,
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=txt,
|
||||
txt_ids=txt_ids,
|
||||
y=vec,
|
||||
txt=pos_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
|
||||
txt_ids=pos_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
|
||||
y=pos_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
)
|
||||
@@ -78,9 +73,9 @@ def denoise(
|
||||
pred = model(
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=txt,
|
||||
txt_ids=txt_ids,
|
||||
y=vec,
|
||||
txt=pos_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
|
||||
txt_ids=pos_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
|
||||
y=pos_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
timestep_index=step_index,
|
||||
@@ -88,6 +83,7 @@ def denoise(
|
||||
controlnet_double_block_residuals=merged_controlnet_residuals.double_block_residuals,
|
||||
controlnet_single_block_residuals=merged_controlnet_residuals.single_block_residuals,
|
||||
ip_adapter_extensions=pos_ip_adapter_extensions,
|
||||
regional_prompting_extension=pos_regional_prompting_extension,
|
||||
)
|
||||
|
||||
step_cfg_scale = cfg_scale[step_index]
|
||||
@@ -97,15 +93,15 @@ def denoise(
|
||||
# TODO(ryand): Add option to run positive and negative predictions in a single batch for better performance
|
||||
# on systems with sufficient VRAM.
|
||||
|
||||
if neg_txt is None or neg_txt_ids is None or neg_vec is None:
|
||||
if neg_regional_prompting_extension is None:
|
||||
raise ValueError("Negative text conditioning is required when cfg_scale is not 1.0.")
|
||||
|
||||
neg_pred = model(
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=neg_txt,
|
||||
txt_ids=neg_txt_ids,
|
||||
y=neg_vec,
|
||||
txt=neg_regional_prompting_extension.regional_text_conditioning.t5_embeddings,
|
||||
txt_ids=neg_regional_prompting_extension.regional_text_conditioning.t5_txt_ids,
|
||||
y=neg_regional_prompting_extension.regional_text_conditioning.clip_embeddings,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
timestep_index=step_index,
|
||||
@@ -113,6 +109,7 @@ def denoise(
|
||||
controlnet_double_block_residuals=None,
|
||||
controlnet_single_block_residuals=None,
|
||||
ip_adapter_extensions=neg_ip_adapter_extensions,
|
||||
regional_prompting_extension=neg_regional_prompting_extension,
|
||||
)
|
||||
pred = neg_pred + step_cfg_scale * (pred - neg_pred)
|
||||
|
||||
|
||||
276
invokeai/backend/flux/extensions/regional_prompting_extension.py
Normal file
276
invokeai/backend/flux/extensions/regional_prompting_extension.py
Normal file
@@ -0,0 +1,276 @@
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torchvision
|
||||
|
||||
from invokeai.backend.flux.text_conditioning import FluxRegionalTextConditioning, FluxTextConditioning
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Range
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.mask import to_standard_float_mask
|
||||
|
||||
|
||||
class RegionalPromptingExtension:
|
||||
"""A class for managing regional prompting with FLUX.
|
||||
|
||||
This implementation is inspired by https://arxiv.org/pdf/2411.02395 (though there are significant differences).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
regional_text_conditioning: FluxRegionalTextConditioning,
|
||||
restricted_attn_mask: torch.Tensor | None = None,
|
||||
):
|
||||
self.regional_text_conditioning = regional_text_conditioning
|
||||
self.restricted_attn_mask = restricted_attn_mask
|
||||
|
||||
def get_double_stream_attn_mask(self, block_index: int) -> torch.Tensor | None:
|
||||
order = [self.restricted_attn_mask, None]
|
||||
return order[block_index % len(order)]
|
||||
|
||||
def get_single_stream_attn_mask(self, block_index: int) -> torch.Tensor | None:
|
||||
order = [self.restricted_attn_mask, None]
|
||||
return order[block_index % len(order)]
|
||||
|
||||
@classmethod
|
||||
def from_text_conditioning(cls, text_conditioning: list[FluxTextConditioning], img_seq_len: int):
|
||||
"""Create a RegionalPromptingExtension from a list of text conditionings.
|
||||
|
||||
Args:
|
||||
text_conditioning (list[FluxTextConditioning]): The text conditionings to use for regional prompting.
|
||||
img_seq_len (int): The image sequence length (i.e. packed_height * packed_width).
|
||||
"""
|
||||
regional_text_conditioning = cls._concat_regional_text_conditioning(text_conditioning)
|
||||
attn_mask_with_restricted_img_self_attn = cls._prepare_restricted_attn_mask(
|
||||
regional_text_conditioning, img_seq_len
|
||||
)
|
||||
return cls(
|
||||
regional_text_conditioning=regional_text_conditioning,
|
||||
restricted_attn_mask=attn_mask_with_restricted_img_self_attn,
|
||||
)
|
||||
|
||||
# Keeping _prepare_unrestricted_attn_mask for reference as an alternative masking strategy:
|
||||
#
|
||||
# @classmethod
|
||||
# def _prepare_unrestricted_attn_mask(
|
||||
# cls,
|
||||
# regional_text_conditioning: FluxRegionalTextConditioning,
|
||||
# img_seq_len: int,
|
||||
# ) -> torch.Tensor:
|
||||
# """Prepare an 'unrestricted' attention mask. In this context, 'unrestricted' means that:
|
||||
# - img self-attention is not masked.
|
||||
# - img regions attend to both txt within their own region and to global prompts.
|
||||
# """
|
||||
# device = TorchDevice.choose_torch_device()
|
||||
|
||||
# # Infer txt_seq_len from the t5_embeddings tensor.
|
||||
# txt_seq_len = regional_text_conditioning.t5_embeddings.shape[1]
|
||||
|
||||
# # In the attention blocks, the txt seq and img seq are concatenated and then attention is applied.
|
||||
# # Concatenation happens in the following order: [txt_seq, img_seq].
|
||||
# # There are 4 portions of the attention mask to consider as we prepare it:
|
||||
# # 1. txt attends to itself
|
||||
# # 2. txt attends to corresponding regional img
|
||||
# # 3. regional img attends to corresponding txt
|
||||
# # 4. regional img attends to itself
|
||||
|
||||
# # Initialize empty attention mask.
|
||||
# regional_attention_mask = torch.zeros(
|
||||
# (txt_seq_len + img_seq_len, txt_seq_len + img_seq_len), device=device, dtype=torch.float16
|
||||
# )
|
||||
|
||||
# for image_mask, t5_embedding_range in zip(
|
||||
# regional_text_conditioning.image_masks, regional_text_conditioning.t5_embedding_ranges, strict=True
|
||||
# ):
|
||||
# # 1. txt attends to itself
|
||||
# regional_attention_mask[
|
||||
# t5_embedding_range.start : t5_embedding_range.end, t5_embedding_range.start : t5_embedding_range.end
|
||||
# ] = 1.0
|
||||
|
||||
# # 2. txt attends to corresponding regional img
|
||||
# # Note that we reshape to (1, img_seq_len) to ensure broadcasting works as desired.
|
||||
# fill_value = image_mask.view(1, img_seq_len) if image_mask is not None else 1.0
|
||||
# regional_attention_mask[t5_embedding_range.start : t5_embedding_range.end, txt_seq_len:] = fill_value
|
||||
|
||||
# # 3. regional img attends to corresponding txt
|
||||
# # Note that we reshape to (img_seq_len, 1) to ensure broadcasting works as desired.
|
||||
# fill_value = image_mask.view(img_seq_len, 1) if image_mask is not None else 1.0
|
||||
# regional_attention_mask[txt_seq_len:, t5_embedding_range.start : t5_embedding_range.end] = fill_value
|
||||
|
||||
# # 4. regional img attends to itself
|
||||
# # Allow unrestricted img self attention.
|
||||
# regional_attention_mask[txt_seq_len:, txt_seq_len:] = 1.0
|
||||
|
||||
# # Convert attention mask to boolean.
|
||||
# regional_attention_mask = regional_attention_mask > 0.5
|
||||
|
||||
# return regional_attention_mask
|
||||
|
||||
@classmethod
|
||||
def _prepare_restricted_attn_mask(
|
||||
cls,
|
||||
regional_text_conditioning: FluxRegionalTextConditioning,
|
||||
img_seq_len: int,
|
||||
) -> torch.Tensor | None:
|
||||
"""Prepare a 'restricted' attention mask. In this context, 'restricted' means that:
|
||||
- img self-attention is only allowed within regions.
|
||||
- img regions only attend to txt within their own region, not to global prompts.
|
||||
"""
|
||||
# Identify background region. I.e. the region that is not covered by any region masks.
|
||||
background_region_mask: None | torch.Tensor = None
|
||||
for image_mask in regional_text_conditioning.image_masks:
|
||||
if image_mask is not None:
|
||||
if background_region_mask is None:
|
||||
background_region_mask = torch.ones_like(image_mask)
|
||||
background_region_mask *= 1 - image_mask
|
||||
|
||||
if background_region_mask is None:
|
||||
# There are no region masks, short-circuit and return None.
|
||||
# TODO(ryand): We could restrict txt-txt attention across multiple global prompts, but this would
|
||||
# is a rare use case and would make the logic here significantly more complicated.
|
||||
return None
|
||||
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
# Infer txt_seq_len from the t5_embeddings tensor.
|
||||
txt_seq_len = regional_text_conditioning.t5_embeddings.shape[1]
|
||||
|
||||
# In the attention blocks, the txt seq and img seq are concatenated and then attention is applied.
|
||||
# Concatenation happens in the following order: [txt_seq, img_seq].
|
||||
# There are 4 portions of the attention mask to consider as we prepare it:
|
||||
# 1. txt attends to itself
|
||||
# 2. txt attends to corresponding regional img
|
||||
# 3. regional img attends to corresponding txt
|
||||
# 4. regional img attends to itself
|
||||
|
||||
# Initialize empty attention mask.
|
||||
regional_attention_mask = torch.zeros(
|
||||
(txt_seq_len + img_seq_len, txt_seq_len + img_seq_len), device=device, dtype=torch.float16
|
||||
)
|
||||
|
||||
for image_mask, t5_embedding_range in zip(
|
||||
regional_text_conditioning.image_masks, regional_text_conditioning.t5_embedding_ranges, strict=True
|
||||
):
|
||||
# 1. txt attends to itself
|
||||
regional_attention_mask[
|
||||
t5_embedding_range.start : t5_embedding_range.end, t5_embedding_range.start : t5_embedding_range.end
|
||||
] = 1.0
|
||||
|
||||
if image_mask is not None:
|
||||
# 2. txt attends to corresponding regional img
|
||||
# Note that we reshape to (1, img_seq_len) to ensure broadcasting works as desired.
|
||||
regional_attention_mask[t5_embedding_range.start : t5_embedding_range.end, txt_seq_len:] = (
|
||||
image_mask.view(1, img_seq_len)
|
||||
)
|
||||
|
||||
# 3. regional img attends to corresponding txt
|
||||
# Note that we reshape to (img_seq_len, 1) to ensure broadcasting works as desired.
|
||||
regional_attention_mask[txt_seq_len:, t5_embedding_range.start : t5_embedding_range.end] = (
|
||||
image_mask.view(img_seq_len, 1)
|
||||
)
|
||||
|
||||
# 4. regional img attends to itself
|
||||
image_mask = image_mask.view(img_seq_len, 1)
|
||||
regional_attention_mask[txt_seq_len:, txt_seq_len:] += image_mask @ image_mask.T
|
||||
else:
|
||||
# We don't allow attention between non-background image regions and global prompts. This helps to ensure
|
||||
# that regions focus on their local prompts. We do, however, allow attention between background regions
|
||||
# and global prompts. If we didn't do this, then the background regions would not attend to any txt
|
||||
# embeddings, which we found experimentally to cause artifacts.
|
||||
|
||||
# 2. global txt attends to background region
|
||||
# Note that we reshape to (1, img_seq_len) to ensure broadcasting works as desired.
|
||||
regional_attention_mask[t5_embedding_range.start : t5_embedding_range.end, txt_seq_len:] = (
|
||||
background_region_mask.view(1, img_seq_len)
|
||||
)
|
||||
|
||||
# 3. background region attends to global txt
|
||||
# Note that we reshape to (img_seq_len, 1) to ensure broadcasting works as desired.
|
||||
regional_attention_mask[txt_seq_len:, t5_embedding_range.start : t5_embedding_range.end] = (
|
||||
background_region_mask.view(img_seq_len, 1)
|
||||
)
|
||||
|
||||
# Allow background regions to attend to themselves.
|
||||
regional_attention_mask[txt_seq_len:, txt_seq_len:] += background_region_mask.view(img_seq_len, 1)
|
||||
regional_attention_mask[txt_seq_len:, txt_seq_len:] += background_region_mask.view(1, img_seq_len)
|
||||
|
||||
# Convert attention mask to boolean.
|
||||
regional_attention_mask = regional_attention_mask > 0.5
|
||||
|
||||
return regional_attention_mask
|
||||
|
||||
@classmethod
|
||||
def _concat_regional_text_conditioning(
|
||||
cls,
|
||||
text_conditionings: list[FluxTextConditioning],
|
||||
) -> FluxRegionalTextConditioning:
|
||||
"""Concatenate regional text conditioning data into a single conditioning tensor (with associated masks)."""
|
||||
concat_t5_embeddings: list[torch.Tensor] = []
|
||||
concat_t5_embedding_ranges: list[Range] = []
|
||||
image_masks: list[torch.Tensor | None] = []
|
||||
|
||||
# Choose global CLIP embedding.
|
||||
# Use the first global prompt's CLIP embedding as the global CLIP embedding. If there is no global prompt, use
|
||||
# the first prompt's CLIP embedding.
|
||||
global_clip_embedding: torch.Tensor = text_conditionings[0].clip_embeddings
|
||||
for text_conditioning in text_conditionings:
|
||||
if text_conditioning.mask is None:
|
||||
global_clip_embedding = text_conditioning.clip_embeddings
|
||||
break
|
||||
|
||||
cur_t5_embedding_len = 0
|
||||
for text_conditioning in text_conditionings:
|
||||
concat_t5_embeddings.append(text_conditioning.t5_embeddings)
|
||||
|
||||
concat_t5_embedding_ranges.append(
|
||||
Range(start=cur_t5_embedding_len, end=cur_t5_embedding_len + text_conditioning.t5_embeddings.shape[1])
|
||||
)
|
||||
|
||||
image_masks.append(text_conditioning.mask)
|
||||
|
||||
cur_t5_embedding_len += text_conditioning.t5_embeddings.shape[1]
|
||||
|
||||
t5_embeddings = torch.cat(concat_t5_embeddings, dim=1)
|
||||
|
||||
# Initialize the txt_ids tensor.
|
||||
pos_bs, pos_t5_seq_len, _ = t5_embeddings.shape
|
||||
t5_txt_ids = torch.zeros(
|
||||
pos_bs, pos_t5_seq_len, 3, dtype=t5_embeddings.dtype, device=TorchDevice.choose_torch_device()
|
||||
)
|
||||
|
||||
return FluxRegionalTextConditioning(
|
||||
t5_embeddings=t5_embeddings,
|
||||
clip_embeddings=global_clip_embedding,
|
||||
t5_txt_ids=t5_txt_ids,
|
||||
image_masks=image_masks,
|
||||
t5_embedding_ranges=concat_t5_embedding_ranges,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def preprocess_regional_prompt_mask(
|
||||
mask: Optional[torch.Tensor], packed_height: int, packed_width: int, dtype: torch.dtype, device: torch.device
|
||||
) -> torch.Tensor:
|
||||
"""Preprocess a regional prompt mask to match the target height and width.
|
||||
If mask is None, returns a mask of all ones with the target height and width.
|
||||
If mask is not None, resizes the mask to the target height and width using 'nearest' interpolation.
|
||||
|
||||
packed_height and packed_width are the target height and width of the mask in the 'packed' latent space.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: The processed mask. shape: (1, 1, packed_height * packed_width).
|
||||
"""
|
||||
|
||||
if mask is None:
|
||||
return torch.ones((1, 1, packed_height * packed_width), dtype=dtype, device=device)
|
||||
|
||||
mask = to_standard_float_mask(mask, out_dtype=dtype)
|
||||
|
||||
tf = torchvision.transforms.Resize(
|
||||
(packed_height, packed_width), interpolation=torchvision.transforms.InterpolationMode.NEAREST
|
||||
)
|
||||
|
||||
# Add a batch dimension to the mask, because torchvision expects shape (batch, channels, h, w).
|
||||
mask = mask.unsqueeze(0) # Shape: (1, h, w) -> (1, 1, h, w)
|
||||
resized_mask = tf(mask)
|
||||
|
||||
# Flatten the height and width dimensions into a single image_seq_len dimension.
|
||||
return resized_mask.flatten(start_dim=2)
|
||||
@@ -41,10 +41,12 @@ def infer_xlabs_ip_adapter_params_from_state_dict(state_dict: dict[str, torch.Te
|
||||
hidden_dim = state_dict["double_blocks.0.processor.ip_adapter_double_stream_k_proj.weight"].shape[0]
|
||||
context_dim = state_dict["double_blocks.0.processor.ip_adapter_double_stream_k_proj.weight"].shape[1]
|
||||
clip_embeddings_dim = state_dict["ip_adapter_proj_model.proj.weight"].shape[1]
|
||||
clip_extra_context_tokens = state_dict["ip_adapter_proj_model.proj.weight"].shape[0] // context_dim
|
||||
|
||||
return XlabsIpAdapterParams(
|
||||
num_double_blocks=num_double_blocks,
|
||||
context_dim=context_dim,
|
||||
hidden_dim=hidden_dim,
|
||||
clip_embeddings_dim=clip_embeddings_dim,
|
||||
clip_extra_context_tokens=clip_extra_context_tokens,
|
||||
)
|
||||
|
||||
@@ -31,13 +31,16 @@ class XlabsIpAdapterParams:
|
||||
hidden_dim: int
|
||||
|
||||
clip_embeddings_dim: int
|
||||
clip_extra_context_tokens: int
|
||||
|
||||
|
||||
class XlabsIpAdapterFlux(torch.nn.Module):
|
||||
def __init__(self, params: XlabsIpAdapterParams):
|
||||
super().__init__()
|
||||
self.image_proj = ImageProjModel(
|
||||
cross_attention_dim=params.context_dim, clip_embeddings_dim=params.clip_embeddings_dim
|
||||
cross_attention_dim=params.context_dim,
|
||||
clip_embeddings_dim=params.clip_embeddings_dim,
|
||||
clip_extra_context_tokens=params.clip_extra_context_tokens,
|
||||
)
|
||||
self.ip_adapter_double_blocks = IPAdapterDoubleBlocks(
|
||||
num_double_blocks=params.num_double_blocks, context_dim=params.context_dim, hidden_dim=params.hidden_dim
|
||||
|
||||
@@ -5,10 +5,10 @@ from einops import rearrange
|
||||
from torch import Tensor
|
||||
|
||||
|
||||
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor:
|
||||
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, attn_mask: Tensor | None = None) -> Tensor:
|
||||
q, k = apply_rope(q, k, pe)
|
||||
|
||||
x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
|
||||
x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask)
|
||||
x = rearrange(x, "B H L D -> B L (H D)")
|
||||
|
||||
return x
|
||||
@@ -24,12 +24,12 @@ def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
|
||||
out = torch.einsum("...n,d->...nd", pos, omega)
|
||||
out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1)
|
||||
out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
|
||||
return out.float()
|
||||
return out.to(dtype=pos.dtype, device=pos.device)
|
||||
|
||||
|
||||
def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor) -> tuple[Tensor, Tensor]:
|
||||
xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2)
|
||||
xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2)
|
||||
xq_ = xq.view(*xq.shape[:-1], -1, 1, 2)
|
||||
xk_ = xk.view(*xk.shape[:-1], -1, 1, 2)
|
||||
xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
|
||||
xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
|
||||
return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk)
|
||||
return xq_out.view(*xq.shape).type_as(xq), xk_out.view(*xk.shape).type_as(xk)
|
||||
|
||||
@@ -5,7 +5,11 @@ from dataclasses import dataclass
|
||||
import torch
|
||||
from torch import Tensor, nn
|
||||
|
||||
from invokeai.backend.flux.custom_block_processor import CustomDoubleStreamBlockProcessor
|
||||
from invokeai.backend.flux.custom_block_processor import (
|
||||
CustomDoubleStreamBlockProcessor,
|
||||
CustomSingleStreamBlockProcessor,
|
||||
)
|
||||
from invokeai.backend.flux.extensions.regional_prompting_extension import RegionalPromptingExtension
|
||||
from invokeai.backend.flux.extensions.xlabs_ip_adapter_extension import XLabsIPAdapterExtension
|
||||
from invokeai.backend.flux.modules.layers import (
|
||||
DoubleStreamBlock,
|
||||
@@ -95,6 +99,7 @@ class Flux(nn.Module):
|
||||
controlnet_double_block_residuals: list[Tensor] | None,
|
||||
controlnet_single_block_residuals: list[Tensor] | None,
|
||||
ip_adapter_extensions: list[XLabsIPAdapterExtension],
|
||||
regional_prompting_extension: RegionalPromptingExtension,
|
||||
) -> Tensor:
|
||||
if img.ndim != 3 or txt.ndim != 3:
|
||||
raise ValueError("Input img and txt tensors must have 3 dimensions.")
|
||||
@@ -117,7 +122,6 @@ class Flux(nn.Module):
|
||||
assert len(controlnet_double_block_residuals) == len(self.double_blocks)
|
||||
for block_index, block in enumerate(self.double_blocks):
|
||||
assert isinstance(block, DoubleStreamBlock)
|
||||
|
||||
img, txt = CustomDoubleStreamBlockProcessor.custom_double_block_forward(
|
||||
timestep_index=timestep_index,
|
||||
total_num_timesteps=total_num_timesteps,
|
||||
@@ -128,6 +132,7 @@ class Flux(nn.Module):
|
||||
vec=vec,
|
||||
pe=pe,
|
||||
ip_adapter_extensions=ip_adapter_extensions,
|
||||
regional_prompting_extension=regional_prompting_extension,
|
||||
)
|
||||
|
||||
if controlnet_double_block_residuals is not None:
|
||||
@@ -140,7 +145,17 @@ class Flux(nn.Module):
|
||||
assert len(controlnet_single_block_residuals) == len(self.single_blocks)
|
||||
|
||||
for block_index, block in enumerate(self.single_blocks):
|
||||
img = block(img, vec=vec, pe=pe)
|
||||
assert isinstance(block, SingleStreamBlock)
|
||||
img = CustomSingleStreamBlockProcessor.custom_single_block_forward(
|
||||
timestep_index=timestep_index,
|
||||
total_num_timesteps=total_num_timesteps,
|
||||
block_index=block_index,
|
||||
block=block,
|
||||
img=img,
|
||||
vec=vec,
|
||||
pe=pe,
|
||||
regional_prompting_extension=regional_prompting_extension,
|
||||
)
|
||||
|
||||
if controlnet_single_block_residuals is not None:
|
||||
img[:, txt.shape[1] :, ...] += controlnet_single_block_residuals[block_index]
|
||||
|
||||
@@ -66,10 +66,7 @@ class RMSNorm(torch.nn.Module):
|
||||
self.scale = nn.Parameter(torch.ones(dim))
|
||||
|
||||
def forward(self, x: Tensor):
|
||||
x_dtype = x.dtype
|
||||
x = x.float()
|
||||
rrms = torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + 1e-6)
|
||||
return (x * rrms).to(dtype=x_dtype) * self.scale
|
||||
return torch.nn.functional.rms_norm(x, self.scale.shape, self.scale, eps=1e-6)
|
||||
|
||||
|
||||
class QKNorm(torch.nn.Module):
|
||||
|
||||
36
invokeai/backend/flux/text_conditioning.py
Normal file
36
invokeai/backend/flux/text_conditioning.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import Range
|
||||
|
||||
|
||||
@dataclass
|
||||
class FluxTextConditioning:
|
||||
t5_embeddings: torch.Tensor
|
||||
clip_embeddings: torch.Tensor
|
||||
# If mask is None, the prompt is a global prompt.
|
||||
mask: torch.Tensor | None
|
||||
|
||||
|
||||
@dataclass
|
||||
class FluxRegionalTextConditioning:
|
||||
# Concatenated text embeddings.
|
||||
# Shape: (1, concatenated_txt_seq_len, 4096)
|
||||
t5_embeddings: torch.Tensor
|
||||
# Shape: (1, concatenated_txt_seq_len, 3)
|
||||
t5_txt_ids: torch.Tensor
|
||||
|
||||
# Global CLIP embeddings.
|
||||
# Shape: (1, 768)
|
||||
clip_embeddings: torch.Tensor
|
||||
|
||||
# A binary mask indicating the regions of the image that the prompt should be applied to. If None, the prompt is a
|
||||
# global prompt.
|
||||
# image_masks[i] is the mask for the ith prompt.
|
||||
# image_masks[i] has shape (1, image_seq_len) and dtype torch.bool.
|
||||
image_masks: list[torch.Tensor | None]
|
||||
|
||||
# List of ranges that represent the embedding ranges for each mask.
|
||||
# t5_embedding_ranges[i] contains the range of the t5 embeddings that correspond to image_masks[i].
|
||||
t5_embedding_ranges: list[Range]
|
||||
BIN
invokeai/backend/image_util/assets/CIELab_to_UPLab.icc
Normal file
BIN
invokeai/backend/image_util/assets/CIELab_to_UPLab.icc
Normal file
Binary file not shown.
1020
invokeai/backend/image_util/composition.py
Normal file
1020
invokeai/backend/image_util/composition.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -469,7 +469,7 @@ class ModelProbe(object):
|
||||
"""
|
||||
# scan model
|
||||
scan_result = scan_file_path(checkpoint)
|
||||
if scan_result.infected_files != 0:
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise Exception("The model {model_name} is potentially infected by malware. Aborting import.")
|
||||
|
||||
|
||||
@@ -485,6 +485,7 @@ MODEL_NAME_TO_PREPROCESSOR = {
|
||||
"lineart anime": "lineart_anime_image_processor",
|
||||
"lineart_anime": "lineart_anime_image_processor",
|
||||
"lineart": "lineart_image_processor",
|
||||
"soft": "hed_image_processor",
|
||||
"softedge": "hed_image_processor",
|
||||
"hed": "hed_image_processor",
|
||||
"shuffle": "content_shuffle_image_processor",
|
||||
|
||||
@@ -298,13 +298,12 @@ ip_adapter_sdxl = StarterModel(
|
||||
previous_names=["IP Adapter SDXL"],
|
||||
)
|
||||
ip_adapter_flux = StarterModel(
|
||||
name="Standard Reference (XLabs FLUX IP-Adapter)",
|
||||
name="Standard Reference (XLabs FLUX IP-Adapter v2)",
|
||||
base=BaseModelType.Flux,
|
||||
source="https://huggingface.co/XLabs-AI/flux-ip-adapter/resolve/main/ip_adapter.safetensors",
|
||||
source="https://huggingface.co/XLabs-AI/flux-ip-adapter-v2/resolve/main/ip_adapter.safetensors",
|
||||
description="References images with a more generalized/looser degree of precision.",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[clip_vit_l_image_encoder],
|
||||
previous_names=["XLabs FLUX IP-Adapter"],
|
||||
)
|
||||
# endregion
|
||||
# region ControlNet
|
||||
|
||||
@@ -44,7 +44,7 @@ def _fast_safetensors_reader(path: str) -> Dict[str, torch.Tensor]:
|
||||
return checkpoint
|
||||
|
||||
|
||||
def read_checkpoint_meta(path: Union[str, Path], scan: bool = False) -> Dict[str, torch.Tensor]:
|
||||
def read_checkpoint_meta(path: Union[str, Path], scan: bool = True) -> Dict[str, torch.Tensor]:
|
||||
if str(path).endswith(".safetensors"):
|
||||
try:
|
||||
path_str = path.as_posix() if isinstance(path, Path) else path
|
||||
@@ -52,16 +52,15 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = False) -> Dict[str
|
||||
except Exception:
|
||||
# TODO: create issue for support "meta"?
|
||||
checkpoint = safetensors.torch.load_file(path, device="cpu")
|
||||
elif str(path).endswith(".gguf"):
|
||||
# The GGUF reader used here uses numpy memmap, so these tensors are not loaded into memory during this function
|
||||
checkpoint = gguf_sd_loader(Path(path), compute_dtype=torch.float32)
|
||||
else:
|
||||
if scan:
|
||||
scan_result = scan_file_path(path)
|
||||
if scan_result.infected_files != 0:
|
||||
if scan_result.infected_files != 0 or scan_result.scan_err:
|
||||
raise Exception(f'The model file "{path}" is potentially infected by malware. Aborting import.')
|
||||
if str(path).endswith(".gguf"):
|
||||
# The GGUF reader used here uses numpy memmap, so these tensors are not loaded into memory during this function
|
||||
checkpoint = gguf_sd_loader(Path(path), compute_dtype=torch.float32)
|
||||
else:
|
||||
checkpoint = torch.load(path, map_location=torch.device("meta"))
|
||||
checkpoint = torch.load(path, map_location=torch.device("meta"))
|
||||
return checkpoint
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# Invoke UI
|
||||
|
||||
<https://invoke-ai.github.io/InvokeAI/contributing/frontend/OVERVIEW/>
|
||||
<https://invoke-ai.github.io/InvokeAI/contributing/frontend/>
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
"@dagrejs/dagre": "^1.1.4",
|
||||
"@dagrejs/graphlib": "^2.2.4",
|
||||
"@fontsource-variable/inter": "^5.1.0",
|
||||
"@invoke-ai/ui-library": "^0.0.43",
|
||||
"@invoke-ai/ui-library": "^0.0.44",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
|
||||
76
invokeai/frontend/web/pnpm-lock.yaml
generated
76
invokeai/frontend/web/pnpm-lock.yaml
generated
@@ -24,8 +24,8 @@ dependencies:
|
||||
specifier: ^5.1.0
|
||||
version: 5.1.0
|
||||
'@invoke-ai/ui-library':
|
||||
specifier: ^0.0.43
|
||||
version: 0.0.43(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
specifier: ^0.0.44
|
||||
version: 0.0.44(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@nanostores/react':
|
||||
specifier: ^0.7.3
|
||||
version: 0.7.3(nanostores@0.11.3)(react@18.3.1)
|
||||
@@ -515,8 +515,8 @@ packages:
|
||||
resolution: {integrity: sha512-MV6D4VLRIHr4PkW4zMyqfrNS1mPlCTiCXwvYGtDFQYr+xHFfonhAuf9WjsSc0nyp2m0OdkSLnzmVKkZFLo25Tg==}
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/anatomy@2.3.4:
|
||||
resolution: {integrity: sha512-fFIYN7L276gw0Q7/ikMMlZxP7mvnjRaWJ7f3Jsf9VtDOi6eAYIBRrhQe6+SZ0PGmoOkRaBc7gSE5oeIbgFFyrw==}
|
||||
/@chakra-ui/anatomy@2.3.5:
|
||||
resolution: {integrity: sha512-3im33cUOxCbISjaBlINE2u8BOwJSCdzpjCX0H+0JxK2xz26UaVA5xeI3NYHUoxDnr/QIrgfrllGxS0szYwOcyg==}
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/breakpoint-utils@2.0.8:
|
||||
@@ -573,12 +573,12 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/hooks@2.4.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-LRKiVE1oA7afT5tbbSKAy7Uas2xFHE6IkrQdbhWCHmkHBUtPvjQQDgwtnd4IRZPmoEfNGwoJ/MQpwOM/NRTTwA==}
|
||||
/@chakra-ui/hooks@2.4.3(react@18.3.1):
|
||||
resolution: {integrity: sha512-Sr2zsoTZw3p7HbrUy4aLpTIkE2XXUelAUgg3NGwMzrmx75bE0qVyiuuTFOuyEzGxYVV2Fe8QtcKKilm6RwzTGg==}
|
||||
peerDependencies:
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
'@zag-js/element-size': 0.31.1
|
||||
copy-to-clipboard: 3.3.3
|
||||
framesync: 6.1.2
|
||||
@@ -596,13 +596,13 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/icons@2.2.4(@chakra-ui/react@2.10.2)(react@18.3.1):
|
||||
/@chakra-ui/icons@2.2.4(@chakra-ui/react@2.10.4)(react@18.3.1):
|
||||
resolution: {integrity: sha512-l5QdBgwrAg3Sc2BRqtNkJpfuLw/pWRDwwT58J6c4PqQT6wzXxyNa8Q0PForu1ltB5qEiFb1kxr/F/HO1EwNa6g==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/react': '>=2.0.0'
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/react': 2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.4(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@@ -825,8 +825,8 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/react@2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-TfIHTqTlxTHYJZBtpiR5EZasPUrLYKJxdbHkdOJb5G1OQ+2c5kKl5XA7c2pMtsEptzb7KxAAIB62t3hxdfWp1w==}
|
||||
/@chakra-ui/react@2.10.4(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-XyRWnuZ1Uw7Mlj5pKUGO5/WhnIHP/EOrpy6lGZC1yWlkd0eIfIpYMZ1ALTZx4KPEdbBaes48dgiMT2ROCqLhkA==}
|
||||
peerDependencies:
|
||||
'@emotion/react': '>=11'
|
||||
'@emotion/styled': '>=11'
|
||||
@@ -834,10 +834,10 @@ packages:
|
||||
react: '>=18'
|
||||
react-dom: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/hooks': 2.4.2(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme': 3.4.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/hooks': 2.4.3(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.12.1(react@18.3.1)
|
||||
'@chakra-ui/theme': 3.4.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.11)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.11)(react@18.3.1)
|
||||
'@popperjs/core': 2.11.8
|
||||
@@ -868,10 +868,10 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/styled-system@2.11.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-y++z2Uop+hjfZX9mbH88F1ikazPv32asD2er56zMJBemUAzweXnHTpiCQbluEDSUDhqmghVZAdb+5L4XLbsRxA==}
|
||||
/@chakra-ui/styled-system@2.12.1(react@18.3.1):
|
||||
resolution: {integrity: sha512-DQph1nDiCPtgze7nDe0a36530ByXb5VpPosKGyWMvKocVeZJcDtYG6XM0+V5a0wKuFBXsViBBRIFUTiUesJAcg==}
|
||||
dependencies:
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
csstype: 3.1.3
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
@@ -915,14 +915,14 @@ packages:
|
||||
color2k: 2.0.3
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/theme-tools@2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1):
|
||||
resolution: {integrity: sha512-3UhKPyzKbV3l/bg1iQN9PBvffYp+EBOoYMUaeTUdieQRPFzo2jbYR0lNCxqv8h5aGM/k54nCHU2M/GStyi9F2A==}
|
||||
/@chakra-ui/theme-tools@2.2.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-K/VJd0QcnKik7m+qZTkggqNLep6+MPUu8IP5TUpHsnSM5R/RVjsJIR7gO8IZVAIMIGLLTIhGshHxeMekqv6LcQ==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/styled-system': '>=2.0.0'
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/anatomy': 2.3.5
|
||||
'@chakra-ui/styled-system': 2.12.1(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
color2k: 2.0.3
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
@@ -948,15 +948,15 @@ packages:
|
||||
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/theme@3.4.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1):
|
||||
resolution: {integrity: sha512-ZwFBLfiMC3URwaO31ONXoKH9k0TX0OW3UjdPF3EQkQpYyrk/fm36GkkzajjtdpWEd7rzDLRsQjPmvwNaSoNDtg==}
|
||||
/@chakra-ui/theme@3.4.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-pfewthgZTFNUYeUwGvhPQO/FTIyf375cFV1AT8N1y0aJiw4KDe7YTGm7p0aFy4AwAjH2ydMgeEx/lua4tx8qyQ==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/styled-system': '>=2.8.0'
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.2(react@18.3.1)
|
||||
'@chakra-ui/anatomy': 2.3.5
|
||||
'@chakra-ui/styled-system': 2.12.1(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.7(@chakra-ui/styled-system@2.12.1)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.2.3(react@18.3.1)
|
||||
transitivePeerDependencies:
|
||||
- react
|
||||
dev: false
|
||||
@@ -981,8 +981,8 @@ packages:
|
||||
lodash.mergewith: 4.6.2
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/utils@2.2.2(react@18.3.1):
|
||||
resolution: {integrity: sha512-jUPLT0JzRMWxpdzH6c+t0YMJYrvc5CLericgITV3zDSXblkfx3DsYXqU11DJTSGZI9dUKzM1Wd0Wswn4eJwvFQ==}
|
||||
/@chakra-ui/utils@2.2.3(react@18.3.1):
|
||||
resolution: {integrity: sha512-cldoCQuexZ6e07/9hWHKD4l1QXXlM1Nax9tuQOBvVf/EgwNZt3nZu8zZRDFlhAOKCTQDkmpLTTu+eXXjChNQOw==}
|
||||
peerDependencies:
|
||||
react: '>=16.8.0'
|
||||
dependencies:
|
||||
@@ -1675,20 +1675,20 @@ packages:
|
||||
prettier: 3.3.3
|
||||
dev: true
|
||||
|
||||
/@invoke-ai/ui-library@0.0.43(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-t3fPYyks07ue3dEBPJuTHbeDLnDckDCOrtvc07mMDbLOnlPEZ0StaeiNGH+oO8qLzAuMAlSTdswgHfzTc2MmPw==}
|
||||
/@invoke-ai/ui-library@0.0.44(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.1.0)(@types/react@18.3.11)(i18next@23.15.1)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-PDseHmdr8oi8cmrpx3UwIYHn4NduAJX2R0pM0pyM54xrCMPMgYiCbC/eOs8Gt4fBc2ziiPZ9UGoW4evnE3YJsg==}
|
||||
peerDependencies:
|
||||
'@fontsource-variable/inter': ^5.0.16
|
||||
react: ^18.2.0
|
||||
react-dom: ^18.2.0
|
||||
dependencies:
|
||||
'@chakra-ui/anatomy': 2.3.4
|
||||
'@chakra-ui/icons': 2.2.4(@chakra-ui/react@2.10.2)(react@18.3.1)
|
||||
'@chakra-ui/anatomy': 2.2.2
|
||||
'@chakra-ui/icons': 2.2.4(@chakra-ui/react@2.10.4)(react@18.3.1)
|
||||
'@chakra-ui/layout': 2.3.1(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/portal': 2.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.11.2(react@18.3.1)
|
||||
'@chakra-ui/theme-tools': 2.2.6(@chakra-ui/styled-system@2.11.2)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.10.4(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.11)(framer-motion@11.10.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.9.2
|
||||
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.11)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.11)(react@18.3.1)
|
||||
'@fontsource-variable/inter': 5.1.0
|
||||
|
||||
@@ -96,7 +96,9 @@
|
||||
"new": "Neu",
|
||||
"ok": "OK",
|
||||
"close": "Schließen",
|
||||
"clipboard": "Zwischenablage"
|
||||
"clipboard": "Zwischenablage",
|
||||
"generating": "Generieren",
|
||||
"loadingModel": "Lade Modell"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Bildgröße",
|
||||
@@ -591,7 +593,15 @@
|
||||
"loraTriggerPhrases": "LoRA-Auslösephrasen",
|
||||
"installingBundle": "Bündel wird installiert",
|
||||
"triggerPhrases": "Auslösephrasen",
|
||||
"mainModelTriggerPhrases": "Hauptmodell-Auslösephrasen"
|
||||
"mainModelTriggerPhrases": "Hauptmodell-Auslösephrasen",
|
||||
"noDefaultSettings": "Für dieses Modell sind keine Standardeinstellungen konfiguriert. Besuchen Sie den Modell-Manager, um Standardeinstellungen hinzuzufügen.",
|
||||
"defaultSettingsOutOfSync": "Einige Einstellungen stimmen nicht mit den Standardeinstellungen des Modells überein:",
|
||||
"clipLEmbed": "CLIP-L einbetten",
|
||||
"clipGEmbed": "CLIP-G einbetten",
|
||||
"hfTokenLabel": "HuggingFace-Token (für einige Modelle erforderlich)",
|
||||
"hfTokenHelperText": "Für die Nutzung einiger Modelle ist ein HF-Token erforderlich. Klicken Sie hier, um Ihr Token zu erstellen oder zu erhalten.",
|
||||
"hfForbidden": "Sie haben keinen Zugriff auf dieses HF-Modell",
|
||||
"hfTokenInvalid": "Ungültiges oder fehlendes HF-Token"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Bilder",
|
||||
@@ -632,12 +642,6 @@
|
||||
"remixImage": "Remix des Bilds erstellen",
|
||||
"imageActions": "Weitere Bildaktionen",
|
||||
"invoke": {
|
||||
"layer": {
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, Bbox-Breite ist {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, Skalierte Bbox-Breite ist {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, Skalierte Bbox-Höhe ist {{height}}",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, Bbox-Höhe ist {{height}}"
|
||||
},
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Skalierte Bbox-Breite ist {{width}}",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Skalierte Bbox-Höhe ist {{height}}",
|
||||
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), Bbox-Breite ist {{width}}",
|
||||
@@ -768,7 +772,8 @@
|
||||
"deletedPrivateBoardsCannotbeRestored": "Gelöschte Boards können nicht wiederhergestellt werden. Wenn Sie „Nur Board löschen“ wählen, werden die Bilder in einen privaten, nicht kategorisierten Status für den Ersteller des Bildes versetzt.",
|
||||
"assetsWithCount_one": "{{count}} in der Sammlung",
|
||||
"assetsWithCount_other": "{{count}} in der Sammlung",
|
||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand."
|
||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner können nicht wiederhergestellt werden. Die Auswahl von \"Nur Ordner löschen\" verschiebt Bilder in einen unkategorisierten Zustand.",
|
||||
"updateBoardError": "Fehler beim Aktualisieren des Ordners"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@@ -840,7 +845,8 @@
|
||||
"upscaling": "Hochskalierung",
|
||||
"canvas": "Leinwand",
|
||||
"prompts_one": "Prompt",
|
||||
"prompts_other": "Prompts"
|
||||
"prompts_other": "Prompts",
|
||||
"batchSize": "Stapelgröße"
|
||||
},
|
||||
"metadata": {
|
||||
"negativePrompt": "Negativ Beschreibung",
|
||||
@@ -871,7 +877,9 @@
|
||||
"recallParameter": "{{label}} Abrufen",
|
||||
"parsingFailed": "Parsing Fehlgeschlagen",
|
||||
"canvasV2Metadata": "Leinwand",
|
||||
"guidance": "Führung"
|
||||
"guidance": "Führung",
|
||||
"seamlessXAxis": "Nahtlose X Achse",
|
||||
"seamlessYAxis": "Nahtlose Y Achse"
|
||||
},
|
||||
"popovers": {
|
||||
"noiseUseCPU": {
|
||||
@@ -1078,6 +1086,21 @@
|
||||
},
|
||||
"patchmatchDownScaleSize": {
|
||||
"heading": "Herunterskalieren"
|
||||
},
|
||||
"paramHeight": {
|
||||
"heading": "Höhe",
|
||||
"paragraphs": [
|
||||
"Höhe des generierten Bildes. Muss ein Vielfaches von 8 sein."
|
||||
]
|
||||
},
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "Vergrößerungsmethode",
|
||||
"paragraphs": [
|
||||
"Methode zum Hochskalieren des Bildes für High Resolution Fix."
|
||||
]
|
||||
},
|
||||
"paramHrf": {
|
||||
"heading": "High Resolution Fix aktivieren"
|
||||
}
|
||||
},
|
||||
"invocationCache": {
|
||||
@@ -1392,7 +1415,13 @@
|
||||
"pullBboxIntoLayerOk": "Bbox in die Ebene gezogen",
|
||||
"saveBboxToGallery": "Bbox in Galerie speichern",
|
||||
"tool": {
|
||||
"bbox": "Bbox"
|
||||
"bbox": "Bbox",
|
||||
"brush": "Pinsel",
|
||||
"eraser": "Radiergummi",
|
||||
"colorPicker": "Farbwähler",
|
||||
"view": "Ansicht",
|
||||
"rectangle": "Rechteck",
|
||||
"move": "Verschieben"
|
||||
},
|
||||
"transform": {
|
||||
"fitToBbox": "An Bbox anpassen",
|
||||
@@ -1434,7 +1463,6 @@
|
||||
"deleteReferenceImage": "Referenzbild löschen",
|
||||
"referenceImage": "Referenzbild",
|
||||
"opacity": "Opazität",
|
||||
"resetCanvas": "Leinwand zurücksetzen",
|
||||
"removeBookmark": "Lesezeichen entfernen",
|
||||
"rasterLayer": "Raster-Ebene",
|
||||
"rasterLayers_withCount_visible": "Raster-Ebenen ({{count}})",
|
||||
@@ -1511,7 +1539,30 @@
|
||||
"layer_one": "Ebene",
|
||||
"layer_other": "Ebenen",
|
||||
"layer_withCount_one": "Ebene ({{count}})",
|
||||
"layer_withCount_other": "Ebenen ({{count}})"
|
||||
"layer_withCount_other": "Ebenen ({{count}})",
|
||||
"fill": {
|
||||
"fillStyle": "Füllstil",
|
||||
"diagonal": "Diagonal",
|
||||
"vertical": "Vertikal",
|
||||
"fillColor": "Füllfarbe",
|
||||
"grid": "Raster",
|
||||
"solid": "Solide",
|
||||
"crosshatch": "Kreuzschraffur",
|
||||
"horizontal": "Horizontal"
|
||||
},
|
||||
"filter": {
|
||||
"apply": "Anwenden",
|
||||
"reset": "Zurücksetzen",
|
||||
"cancel": "Abbrechen",
|
||||
"spandrel_filter": {
|
||||
"label": "Bild-zu-Bild Modell",
|
||||
"description": "Ein Bild-zu-Bild Modell auf der ausgewählten Ebene ausführen.",
|
||||
"model": "Modell"
|
||||
},
|
||||
"filters": "Filter",
|
||||
"filterType": "Filtertyp",
|
||||
"filter": "Filter"
|
||||
}
|
||||
},
|
||||
"upsell": {
|
||||
"shareAccess": "Zugang teilen",
|
||||
|
||||
@@ -122,6 +122,7 @@
|
||||
"goTo": "Go to",
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"loadingImage": "Loading Image",
|
||||
"loadingModel": "Loading Model",
|
||||
"imageFailedToLoad": "Unable to Load Image",
|
||||
"img2img": "Image To Image",
|
||||
"inpaint": "inpaint",
|
||||
@@ -175,7 +176,8 @@
|
||||
"reset": "Reset",
|
||||
"none": "None",
|
||||
"new": "New",
|
||||
"generating": "Generating"
|
||||
"generating": "Generating",
|
||||
"warnings": "Warnings"
|
||||
},
|
||||
"hrf": {
|
||||
"hrf": "High Resolution Fix",
|
||||
@@ -262,7 +264,8 @@
|
||||
"iterations_one": "Iteration",
|
||||
"iterations_other": "Iterations",
|
||||
"generations_one": "Generation",
|
||||
"generations_other": "Generations"
|
||||
"generations_other": "Generations",
|
||||
"batchSize": "Batch Size"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "Invocation Cache",
|
||||
@@ -976,6 +979,8 @@
|
||||
"zoomOutNodes": "Zoom Out",
|
||||
"betaDesc": "This invocation is in beta. Until it is stable, it may have breaking changes during app updates. We plan to support this invocation long-term.",
|
||||
"prototypeDesc": "This invocation is a prototype. It may have breaking changes during app updates and may be removed at any time.",
|
||||
"internalDesc": "This invocation is used internally by Invoke. It may have breaking changes during app updates and may be removed at any time.",
|
||||
"specialDesc": "This invocation some special handling in the app. For example, Batch nodes are used to queue multiple graphs from a single workflow.",
|
||||
"imageAccessError": "Unable to find image {{image_name}}, resetting to default",
|
||||
"boardAccessError": "Unable to find board {{board_id}}, resetting to default",
|
||||
"modelAccessError": "Unable to find model {{key}}, resetting to default",
|
||||
@@ -1014,8 +1019,11 @@
|
||||
"addingImagesTo": "Adding images to",
|
||||
"invoke": "Invoke",
|
||||
"missingFieldTemplate": "Missing field template",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} missing input",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}}: missing input",
|
||||
"missingNodeTemplate": "Missing node template",
|
||||
"collectionEmpty": "{{nodeLabel}} -> {{fieldLabel}} empty collection",
|
||||
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}}: too few items, minimum {{minItems}}",
|
||||
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}}: too many items, maximum {{maxItems}}",
|
||||
"noModelSelected": "No model selected",
|
||||
"noT5EncoderModelSelected": "No T5 Encoder model selected for FLUX generation",
|
||||
"noFLUXVAEModelSelected": "No VAE model selected for FLUX generation",
|
||||
@@ -1024,26 +1032,14 @@
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), bbox height is {{height}}",
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), scaled bbox width is {{width}}",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), scaled bbox height is {{height}}",
|
||||
"canvasIsFiltering": "Canvas is filtering",
|
||||
"canvasIsTransforming": "Canvas is transforming",
|
||||
"canvasIsRasterizing": "Canvas is rasterizing",
|
||||
"canvasIsCompositing": "Canvas is compositing",
|
||||
"canvasIsFiltering": "Canvas is busy (filtering)",
|
||||
"canvasIsTransforming": "Canvas is busy (transforming)",
|
||||
"canvasIsRasterizing": "Canvas is busy (rasterizing)",
|
||||
"canvasIsCompositing": "Canvas is busy (compositing)",
|
||||
"canvasIsSelectingObject": "Canvas is busy (selecting object)",
|
||||
"noPrompts": "No prompts generated",
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"systemDisconnected": "System disconnected",
|
||||
"layer": {
|
||||
"controlAdapterNoModelSelected": "no Control Adapter model selected",
|
||||
"controlAdapterIncompatibleBaseModel": "incompatible Control Adapter base model",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, bbox width is {{width}}",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, bbox height is {{height}}",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, scaled bbox width is {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, scaled bbox height is {{height}}",
|
||||
"ipAdapterNoModelSelected": "no IP adapter selected",
|
||||
"ipAdapterIncompatibleBaseModel": "incompatible IP Adapter base model",
|
||||
"ipAdapterNoImageSelected": "no IP Adapter image selected",
|
||||
"rgNoPromptsOrIPAdapters": "no text prompts or IP Adapters",
|
||||
"rgNoRegion": "no region selected"
|
||||
}
|
||||
"systemDisconnected": "System disconnected"
|
||||
},
|
||||
"maskBlur": "Mask Blur",
|
||||
"negativePromptPlaceholder": "Negative Prompt",
|
||||
@@ -1311,8 +1307,9 @@
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Begin / End Step Percentage",
|
||||
"paragraphs": [
|
||||
"The part of the of the denoising process that will have the Control Adapter applied.",
|
||||
"Generally, Control Adapters applied at the start of the process guide composition, and Control Adapters applied at the end guide details."
|
||||
"This setting determines which portion of the denoising (generation) process incorporates the guidance from this layer.",
|
||||
"• Start Step (%): Specifies when to begin applying the guidance from this layer during the generation process.",
|
||||
"• End Step (%): Specifies when to stop applying this layer's guidance and revert general guidance from the model and other settings."
|
||||
]
|
||||
},
|
||||
"controlNetControlMode": {
|
||||
@@ -1322,7 +1319,7 @@
|
||||
"controlNetProcessor": {
|
||||
"heading": "Processor",
|
||||
"paragraphs": [
|
||||
"Method of processing the input image to guide the generation process. Different processors will providedifferent effects or styles in your generated images."
|
||||
"Method of processing the input image to guide the generation process. Different processors will provide different effects or styles in your generated images."
|
||||
]
|
||||
},
|
||||
"controlNetResizeMode": {
|
||||
@@ -1330,13 +1327,15 @@
|
||||
"paragraphs": ["Method to fit Control Adapter's input image size to the output generation size."]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Method",
|
||||
"paragraphs": ["Method by which to apply the current IP Adapter."]
|
||||
"heading": "Mode",
|
||||
"paragraphs": ["The mode defines how the reference image will guide the generation process."]
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "Weight",
|
||||
"paragraphs": [
|
||||
"Weight of the Control Adapter. Higher weight will lead to larger impacts on the final image."
|
||||
"Adjusts how strongly the layer influences the generation process",
|
||||
"• Higher Weight (.75-2): Creates a more significant impact on the final result.",
|
||||
"• Lower Weight (0-.75): Creates a smaller impact on the final result."
|
||||
]
|
||||
},
|
||||
"dynamicPrompts": {
|
||||
@@ -1658,7 +1657,6 @@
|
||||
"newControlLayerError": "Problem Creating Control Layer",
|
||||
"newRasterLayerOk": "Created Raster Layer",
|
||||
"newRasterLayerError": "Problem Creating Raster Layer",
|
||||
"newFromImage": "New from Image",
|
||||
"pullBboxIntoLayerOk": "Bbox Pulled Into Layer",
|
||||
"pullBboxIntoLayerError": "Problem Pulling BBox Into Layer",
|
||||
"pullBboxIntoReferenceImageOk": "Bbox Pulled Into ReferenceImage",
|
||||
@@ -1671,7 +1669,7 @@
|
||||
"mergingLayers": "Merging layers",
|
||||
"clearHistory": "Clear History",
|
||||
"bboxOverlay": "Show Bbox Overlay",
|
||||
"resetCanvas": "Reset Canvas",
|
||||
"newSession": "New Session",
|
||||
"clearCaches": "Clear Caches",
|
||||
"recalculateRects": "Recalculate Rects",
|
||||
"clipToBbox": "Clip Strokes to Bbox",
|
||||
@@ -1703,8 +1701,12 @@
|
||||
"controlLayer": "Control Layer",
|
||||
"inpaintMask": "Inpaint Mask",
|
||||
"regionalGuidance": "Regional Guidance",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) as $t(controlLayers.rasterLayer)",
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) as $t(controlLayers.controlLayer)",
|
||||
"referenceImageRegional": "Reference Image (Regional)",
|
||||
"referenceImageGlobal": "Reference Image (Global)",
|
||||
"asRasterLayer": "As $t(controlLayers.rasterLayer)",
|
||||
"asRasterLayerResize": "As $t(controlLayers.rasterLayer) (Resize)",
|
||||
"asControlLayer": "As $t(controlLayers.controlLayer)",
|
||||
"asControlLayerResize": "As $t(controlLayers.controlLayer) (Resize)",
|
||||
"referenceImage": "Reference Image",
|
||||
"regionalReferenceImage": "Regional Reference Image",
|
||||
"globalReferenceImage": "Global Reference Image",
|
||||
@@ -1772,6 +1774,7 @@
|
||||
"pullBboxIntoLayer": "Pull Bbox into Layer",
|
||||
"pullBboxIntoReferenceImage": "Pull Bbox into Reference Image",
|
||||
"showProgressOnCanvas": "Show Progress on Canvas",
|
||||
"useImage": "Use Image",
|
||||
"prompt": "Prompt",
|
||||
"negativePrompt": "Negative Prompt",
|
||||
"beginEndStepPercentShort": "Begin/End %",
|
||||
@@ -1780,8 +1783,26 @@
|
||||
"newGallerySessionDesc": "This will clear the canvas and all settings except for your model selection. Generations will be sent to the gallery.",
|
||||
"newCanvasSession": "New Canvas Session",
|
||||
"newCanvasSessionDesc": "This will clear the canvas and all settings except for your model selection. Generations will be staged on the canvas.",
|
||||
"resetCanvasLayers": "Reset Canvas Layers",
|
||||
"resetGenerationSettings": "Reset Generation Settings",
|
||||
"replaceCurrent": "Replace Current",
|
||||
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
|
||||
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer to get started.",
|
||||
"warnings": {
|
||||
"problemsFound": "Problems found",
|
||||
"unsupportedModel": "layer not supported for selected base model",
|
||||
"controlAdapterNoModelSelected": "no Control Layer model selected",
|
||||
"controlAdapterIncompatibleBaseModel": "incompatible Control Layer base model",
|
||||
"controlAdapterNoControl": "no control selected/drawn",
|
||||
"ipAdapterNoModelSelected": "no Reference Image model selected",
|
||||
"ipAdapterIncompatibleBaseModel": "incompatible Reference Image base model",
|
||||
"ipAdapterNoImageSelected": "no Reference Image image selected",
|
||||
"rgNoPromptsOrIPAdapters": "no text prompts or Reference Images",
|
||||
"rgNegativePromptNotSupported": "Negative Prompt not supported for selected base model",
|
||||
"rgReferenceImagesNotSupported": "regional Reference Images not supported for selected base model",
|
||||
"rgAutoNegativeNotSupported": "Auto-Negative not supported for selected base model",
|
||||
"rgNoRegion": "no region drawn"
|
||||
},
|
||||
"controlMode": {
|
||||
"controlMode": "Control Mode",
|
||||
"balanced": "Balanced (recommended)",
|
||||
@@ -1790,10 +1811,13 @@
|
||||
"megaControl": "Mega Control"
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"ipAdapterMethod": "IP Adapter Method",
|
||||
"ipAdapterMethod": "Mode",
|
||||
"full": "Style and Composition",
|
||||
"fullDesc": "Applies visual style (colors, textures) & composition (layout, structure).",
|
||||
"style": "Style Only",
|
||||
"composition": "Composition Only"
|
||||
"styleDesc": "Applies visual style (colors, textures) without considering its layout.",
|
||||
"composition": "Composition Only",
|
||||
"compositionDesc": "Replicates layout & structure while ignoring the reference's style."
|
||||
},
|
||||
"fill": {
|
||||
"fillColor": "Fill Color",
|
||||
@@ -2109,11 +2133,73 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"<StrongComponent>SD 3.5</StrongComponent>: Support for Text-to-Image in Workflows with SD 3.5 Medium and Large.",
|
||||
"<StrongComponent>Canvas</StrongComponent>: Streamlined Control Layer processing and improved default Control settings."
|
||||
"<StrongComponent>FLUX Regional Guidance (beta)</StrongComponent>: Our beta release of FLUX Regional Guidance is live for regional prompt control.",
|
||||
"<StrongComponent>Various UX Improvements</StrongComponent>: A number of small UX and Quality of Life improvements throughout the app."
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
"watchUiUpdatesOverview": "Watch UI Updates Overview"
|
||||
},
|
||||
"supportVideos": {
|
||||
"supportVideos": "Support Videos",
|
||||
"gettingStarted": "Getting Started",
|
||||
"controlCanvas": "Control Canvas",
|
||||
"watch": "Watch",
|
||||
"studioSessionsDesc1": "Check out the <StudioSessionsPlaylistLink /> for Invoke deep dives.",
|
||||
"studioSessionsDesc2": "Join our <DiscordLink /> to participate in the live sessions and ask questions. Sessions are uploaded to the playlist the following week.",
|
||||
"videos": {
|
||||
"creatingYourFirstImage": {
|
||||
"title": "Creating Your First Image",
|
||||
"description": "Introduction to creating an image from scratch using Invoke's tools."
|
||||
},
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"title": "Using Control Layers and Reference Guides",
|
||||
"description": "Learn how to guide your image creation with control layers and reference images."
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"title": "Understanding Image-to-Image and Denoising",
|
||||
"description": "Overview of image-to-image transformations and denoising in Invoke."
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"title": "Exploring AI Models and Concept Adapters",
|
||||
"description": "Dive into AI models and how to use concept adapters for creative control."
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"title": "Creating and Composing on Invoke's Control Canvas",
|
||||
"description": "Learn to compose images using Invoke's control canvas."
|
||||
},
|
||||
"upscaling": {
|
||||
"title": "Upscaling",
|
||||
"description": "How to upscale images with Invoke's tools to enhance resolution."
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"title": "How Do I Generate and Save to the Gallery?",
|
||||
"description": "Steps to generate and save images to the gallery."
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"title": "How Do I Edit on the Canvas?",
|
||||
"description": "Guide to editing images directly on the canvas."
|
||||
},
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"title": "How Do I Do Image-to-Image Transformation?",
|
||||
"description": "Tutorial on performing image-to-image transformations in Invoke."
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "How Do I Use Control Nets and Control Layers?",
|
||||
"description": "Learn to apply control layers and controlnets to your images."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"title": "How Do I Use Global IP Adapters and Reference Images?",
|
||||
"description": "Introduction to adding reference images and global IP adapters."
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "How Do I Use Inpaint Masks?",
|
||||
"description": "How to apply inpaint masks for image correction and variation."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"title": "How Do I Outpaint?",
|
||||
"description": "Guide to outpainting beyond the original image borders."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"discordLabel": "Discord",
|
||||
"back": "Atrás",
|
||||
"loading": "Cargando",
|
||||
"postprocessing": "Postprocesado",
|
||||
"postprocessing": "Postprocesamiento",
|
||||
"txt2img": "De texto a imagen",
|
||||
"accept": "Aceptar",
|
||||
"cancel": "Cancelar",
|
||||
@@ -64,7 +64,7 @@
|
||||
"prevPage": "Página Anterior",
|
||||
"red": "Rojo",
|
||||
"alpha": "Transparencia",
|
||||
"outputs": "Salidas",
|
||||
"outputs": "Resultados",
|
||||
"learnMore": "Aprende más",
|
||||
"enabled": "Activado",
|
||||
"disabled": "Desactivado",
|
||||
@@ -73,7 +73,32 @@
|
||||
"created": "Creado",
|
||||
"save": "Guardar",
|
||||
"unknownError": "Error Desconocido",
|
||||
"blue": "Azul"
|
||||
"blue": "Azul",
|
||||
"clipboard": "Portapapeles",
|
||||
"loadingImage": "Cargando la imagen",
|
||||
"inpaint": "inpaint",
|
||||
"ipAdapter": "Adaptador IP",
|
||||
"t2iAdapter": "Adaptador T2I",
|
||||
"apply": "Aplicar",
|
||||
"openInViewer": "Abrir en el visor",
|
||||
"off": "Apagar",
|
||||
"generating": "Generando",
|
||||
"ok": "De acuerdo",
|
||||
"placeholderSelectAModel": "Seleccionar un modelo",
|
||||
"reset": "Restablecer",
|
||||
"none": "Ninguno",
|
||||
"new": "Nuevo",
|
||||
"dontShowMeThese": "No mostrar estos",
|
||||
"loadingModel": "Cargando el modelo",
|
||||
"view": "Ver",
|
||||
"edit": "Editar",
|
||||
"safetensors": "Safetensors",
|
||||
"toResolve": "Para resolver",
|
||||
"localSystem": "Sistema local",
|
||||
"notInstalled": "No $t(common.installed)",
|
||||
"outpaint": "outpaint",
|
||||
"simple": "Sencillo",
|
||||
"close": "Cerrar"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Tamaño de la imagen",
|
||||
@@ -85,7 +110,63 @@
|
||||
"deleteImage_other": "Eliminar {{count}} Imágenes",
|
||||
"deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.",
|
||||
"assets": "Activos",
|
||||
"autoAssignBoardOnClick": "Asignación automática de tableros al hacer clic"
|
||||
"autoAssignBoardOnClick": "Asignar automática tableros al hacer clic",
|
||||
"gallery": "Galería",
|
||||
"noImageSelected": "Sin imágenes seleccionadas",
|
||||
"bulkDownloadRequestFailed": "Error al preparar la descarga",
|
||||
"oldestFirst": "La más antigua primero",
|
||||
"sideBySide": "conjuntamente",
|
||||
"selectForCompare": "Seleccionar para comparar",
|
||||
"alwaysShowImageSizeBadge": "Mostrar siempre las dimensiones de la imagen",
|
||||
"currentlyInUse": "Esta imagen se utiliza actualmente con las siguientes funciones:",
|
||||
"unableToLoad": "No se puede cargar la galería",
|
||||
"selectAllOnPage": "Seleccionar todo en la página",
|
||||
"selectAnImageToCompare": "Seleccione una imagen para comparar",
|
||||
"bulkDownloadFailed": "Error en la descarga",
|
||||
"compareHelp2": "Presione <Kbd> M </Kbd> para recorrer los modos de comparación.",
|
||||
"move": "Mover",
|
||||
"copy": "Copiar",
|
||||
"drop": "Gota",
|
||||
"displayBoardSearch": "Tablero de búsqueda",
|
||||
"deleteSelection": "Borrar selección",
|
||||
"downloadSelection": "Descargar selección",
|
||||
"openInViewer": "Abrir en el visor",
|
||||
"searchImages": "Búsqueda por metadatos",
|
||||
"swapImages": "Intercambiar imágenes",
|
||||
"sortDirection": "Orden de clasificación",
|
||||
"showStarredImagesFirst": "Mostrar imágenes destacadas primero",
|
||||
"go": "Ir",
|
||||
"bulkDownloadRequested": "Preparando la descarga",
|
||||
"image": "imagen",
|
||||
"compareHelp4": "Presione <Kbd> Z </Kbd> o <Kbd> Esc </Kbd> para salir.",
|
||||
"viewerImage": "Ver imagen",
|
||||
"dropOrUpload": "$t(gallery.drop) o cargar",
|
||||
"displaySearch": "Buscar imagen",
|
||||
"download": "Descargar",
|
||||
"exitBoardSearch": "Finalizar búsqueda",
|
||||
"exitSearch": "Salir de la búsqueda de imágenes",
|
||||
"featuresWillReset": "Si elimina esta imagen, dichas funciones se restablecerán inmediatamente.",
|
||||
"jump": "Omitir",
|
||||
"loading": "Cargando",
|
||||
"newestFirst": "La más nueva primero",
|
||||
"unstarImage": "Dejar de ser favorita",
|
||||
"bulkDownloadRequestedDesc": "Su solicitud de descarga se está preparando. Esto puede tardar unos minutos.",
|
||||
"hover": "Desplazar",
|
||||
"compareHelp1": "Mantenga presionada la tecla <Kbd> Alt </Kbd> mientras hace clic en una imagen de la galería o utiliza las teclas de flecha para cambiar la imagen de comparación.",
|
||||
"stretchToFit": "Estirar para encajar",
|
||||
"exitCompare": "Salir de la comparación",
|
||||
"starImage": "Imágenes favoritas",
|
||||
"dropToUpload": "$t(gallery.drop) para cargar",
|
||||
"slider": "Deslizador",
|
||||
"assetsTab": "Archivos que has cargado para utilizarlos en tus proyectos.",
|
||||
"imagesTab": "Imágenes que ha creado y guardado en Invoke.",
|
||||
"compareImage": "Comparar imagen",
|
||||
"boardsSettings": "Ajustes de los tableros",
|
||||
"imagesSettings": "Configuración de imágenes de la galería",
|
||||
"compareHelp3": "Presione <Kbd> C </Kbd> para intercambiar las imágenes comparadas.",
|
||||
"showArchivedBoards": "Mostrar paneles archivados",
|
||||
"closeViewer": "Cerrar visor",
|
||||
"openViewer": "Abrir visor"
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "Gestor de Modelos",
|
||||
@@ -131,7 +212,13 @@
|
||||
"modelDeleted": "Modelo eliminado",
|
||||
"modelDeleteFailed": "Error al borrar el modelo",
|
||||
"settings": "Ajustes",
|
||||
"syncModels": "Sincronizar las plantillas"
|
||||
"syncModels": "Sincronizar las plantillas",
|
||||
"clipEmbed": "Incrustar CLIP",
|
||||
"addModels": "Añadir modelos",
|
||||
"advanced": "Avanzado",
|
||||
"clipGEmbed": "Incrustar CLIP-G",
|
||||
"cancel": "Cancelar",
|
||||
"clipLEmbed": "Incrustar CLIP-L"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Imágenes",
|
||||
@@ -158,19 +245,19 @@
|
||||
"useSeed": "Usar Semilla",
|
||||
"useAll": "Usar Todo",
|
||||
"info": "Información",
|
||||
"showOptionsPanel": "Mostrar panel de opciones",
|
||||
"showOptionsPanel": "Mostrar panel lateral (O o T)",
|
||||
"symmetry": "Simetría",
|
||||
"copyImage": "Copiar la imagen",
|
||||
"general": "General",
|
||||
"denoisingStrength": "Intensidad de la eliminación del ruido",
|
||||
"seamlessXAxis": "Eje x",
|
||||
"seamlessYAxis": "Eje y",
|
||||
"seamlessXAxis": "Eje X sin juntas",
|
||||
"seamlessYAxis": "Eje Y sin juntas",
|
||||
"scheduler": "Programador",
|
||||
"positivePromptPlaceholder": "Prompt Positivo",
|
||||
"negativePromptPlaceholder": "Prompt Negativo",
|
||||
"controlNetControlMode": "Modo de control",
|
||||
"clipSkip": "Omitir el CLIP",
|
||||
"maskBlur": "Difuminar",
|
||||
"maskBlur": "Desenfoque de máscara",
|
||||
"patchmatchDownScaleSize": "Reducir a escala",
|
||||
"coherenceMode": "Modo"
|
||||
},
|
||||
@@ -202,16 +289,19 @@
|
||||
"serverError": "Error en el servidor",
|
||||
"canceled": "Procesando la cancelación",
|
||||
"connected": "Conectado al servidor",
|
||||
"uploadFailedInvalidUploadDesc": "Debe ser una sola imagen PNG o JPEG",
|
||||
"parameterSet": "Conjunto de parámetros",
|
||||
"parameterNotSet": "Parámetro no configurado",
|
||||
"uploadFailedInvalidUploadDesc": "Deben ser imágenes PNG o JPEG.",
|
||||
"parameterSet": "Parámetro recuperado",
|
||||
"parameterNotSet": "Parámetro no recuperado",
|
||||
"problemCopyingImage": "No se puede copiar la imagen",
|
||||
"errorCopied": "Error al copiar",
|
||||
"baseModelChanged": "Modelo base cambiado",
|
||||
"addedToBoard": "Añadido al tablero",
|
||||
"addedToBoard": "Se agregó a los activos del panel {{name}}",
|
||||
"baseModelChangedCleared_one": "Borrado o desactivado {{count}} submodelo incompatible",
|
||||
"baseModelChangedCleared_many": "Borrados o desactivados {{count}} submodelos incompatibles",
|
||||
"baseModelChangedCleared_other": "Borrados o desactivados {{count}} submodelos incompatibles"
|
||||
"baseModelChangedCleared_other": "Borrados o desactivados {{count}} submodelos incompatibles",
|
||||
"addedToUncategorized": "Añadido a los activos del tablero $t(boards.uncategorized)",
|
||||
"imagesWillBeAddedTo": "Las imágenes subidas se añadirán a los activos del panel {{boardName}}.",
|
||||
"layerCopiedToClipboard": "Capa copiada en el portapapeles"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Activar la barra de progreso",
|
||||
@@ -226,7 +316,8 @@
|
||||
"mode": "Modo",
|
||||
"submitSupportTicket": "Enviar Ticket de Soporte",
|
||||
"toggleRightPanel": "Activar o desactivar el panel derecho (G)",
|
||||
"toggleLeftPanel": "Activar o desactivar el panel izquierdo (T)"
|
||||
"toggleLeftPanel": "Activar o desactivar el panel izquierdo (T)",
|
||||
"uploadImages": "Cargar imagen(es)"
|
||||
},
|
||||
"nodes": {
|
||||
"zoomInNodes": "Acercar",
|
||||
@@ -238,7 +329,8 @@
|
||||
"showMinimapnodes": "Mostrar el minimapa",
|
||||
"reloadNodeTemplates": "Recargar las plantillas de nodos",
|
||||
"loadWorkflow": "Cargar el flujo de trabajo",
|
||||
"downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON"
|
||||
"downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON",
|
||||
"boardAccessError": "No se puede encontrar el panel {{board_id}}, se está restableciendo al valor predeterminado"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Agregar panel automáticamente",
|
||||
@@ -255,7 +347,7 @@
|
||||
"bottomMessage": "Al eliminar este panel y las imágenes que contiene, se restablecerán las funciones que los estén utilizando actualmente.",
|
||||
"deleteBoardAndImages": "Borrar el panel y las imágenes",
|
||||
"loading": "Cargando...",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al Seleccionar 'Borrar Solo el Panel' transferirá las imágenes a un estado sin categorizar.",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al Seleccionar 'Borrar solo el panel' transferirá las imágenes a un estado sin categorizar.",
|
||||
"move": "Mover",
|
||||
"menuItemAutoAdd": "Agregar automáticamente a este panel",
|
||||
"searchBoard": "Buscando paneles…",
|
||||
@@ -263,29 +355,33 @@
|
||||
"downloadBoard": "Descargar panel",
|
||||
"deleteBoardOnly": "Borrar solo el panel",
|
||||
"myBoard": "Mi panel",
|
||||
"noMatching": "No hay paneles que coincidan",
|
||||
"noMatching": "Sin paneles coincidentes",
|
||||
"imagesWithCount_one": "{{count}} imagen",
|
||||
"imagesWithCount_many": "{{count}} imágenes",
|
||||
"imagesWithCount_other": "{{count}} imágenes",
|
||||
"assetsWithCount_one": "{{count}} activo",
|
||||
"assetsWithCount_many": "{{count}} activos",
|
||||
"assetsWithCount_other": "{{count}} activos",
|
||||
"hideBoards": "Ocultar Paneles",
|
||||
"addPrivateBoard": "Agregar un tablero privado",
|
||||
"addSharedBoard": "Agregar Panel Compartido",
|
||||
"hideBoards": "Ocultar paneles",
|
||||
"addPrivateBoard": "Agregar un panel privado",
|
||||
"addSharedBoard": "Añadir panel compartido",
|
||||
"boards": "Paneles",
|
||||
"archiveBoard": "Archivar Panel",
|
||||
"archiveBoard": "Archivar panel",
|
||||
"archived": "Archivado",
|
||||
"selectedForAutoAdd": "Seleccionado para agregar automáticamente",
|
||||
"unarchiveBoard": "Desarchivar el tablero",
|
||||
"noBoards": "No hay tableros {{boardType}}",
|
||||
"shared": "Carpetas compartidas",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Los tableros eliminados no se pueden restaurar. Al elegir \"Eliminar solo tablero\", las imágenes se colocan en un estado privado y sin categoría para el creador de la imagen."
|
||||
"unarchiveBoard": "Desarchivar el panel",
|
||||
"noBoards": "No hay paneles {{boardType}}",
|
||||
"shared": "Paneles compartidos",
|
||||
"deletedPrivateBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar. Al elegir \"Eliminar solo el panel\", las imágenes se colocan en un estado privado y sin categoría para el creador de la imagen.",
|
||||
"viewBoards": "Ver paneles",
|
||||
"private": "Paneles privados",
|
||||
"updateBoardError": "No se pudo actualizar el panel"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
"title": "Composición",
|
||||
"infillTab": "Relleno"
|
||||
"infillTab": "Relleno",
|
||||
"coherenceTab": "Parámetros de la coherencia"
|
||||
},
|
||||
"generation": {
|
||||
"title": "Generación"
|
||||
@@ -309,7 +405,10 @@
|
||||
"workflows": "Flujos de trabajo",
|
||||
"models": "Modelos",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)"
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"upscaling": "Upscaling",
|
||||
"gallery": "Galería",
|
||||
"upscalingTab": "$t(ui.tabs.upscaling) $t(common.tab)"
|
||||
}
|
||||
},
|
||||
"queue": {
|
||||
@@ -317,12 +416,81 @@
|
||||
"front": "Delante",
|
||||
"batchQueuedDesc_one": "Se agregó {{count}} sesión a {{direction}} la cola",
|
||||
"batchQueuedDesc_many": "Se agregaron {{count}} sesiones a {{direction}} la cola",
|
||||
"batchQueuedDesc_other": "Se agregaron {{count}} sesiones a {{direction}} la cola"
|
||||
"batchQueuedDesc_other": "Se agregaron {{count}} sesiones a {{direction}} la cola",
|
||||
"clearQueueAlertDialog": "Al vaciar la cola se cancela inmediatamente cualquier elemento de procesamiento y se vaciará la cola por completo. Los filtros pendientes se cancelarán.",
|
||||
"time": "Tiempo",
|
||||
"clearFailed": "Error al vaciar la cola",
|
||||
"cancelFailed": "Error al cancelar el elemento",
|
||||
"resumeFailed": "Error al reanudar el proceso",
|
||||
"pause": "Pausar",
|
||||
"pauseTooltip": "Pausar el proceso",
|
||||
"cancelBatchSucceeded": "Lote cancelado",
|
||||
"pruneSucceeded": "Se purgaron {{item_count}} elementos completados de la cola",
|
||||
"pruneFailed": "Error al purgar la cola",
|
||||
"cancelBatchFailed": "Error al cancelar los lotes",
|
||||
"pauseFailed": "Error al pausar el proceso",
|
||||
"status": "Estado",
|
||||
"origin": "Origen",
|
||||
"destination": "Destino",
|
||||
"generations_one": "Generación",
|
||||
"generations_many": "Generaciones",
|
||||
"generations_other": "Generaciones",
|
||||
"resume": "Reanudar",
|
||||
"queueEmpty": "Cola vacía",
|
||||
"cancelItem": "Cancelar elemento",
|
||||
"cancelBatch": "Cancelar lote",
|
||||
"openQueue": "Abrir la cola",
|
||||
"completed": "Completado",
|
||||
"enqueueing": "Añadir lotes a la cola",
|
||||
"clear": "Limpiar",
|
||||
"pauseSucceeded": "Proceso pausado",
|
||||
"resumeSucceeded": "Proceso reanudado",
|
||||
"resumeTooltip": "Reanudar proceso",
|
||||
"cancel": "Cancelar",
|
||||
"cancelTooltip": "Cancelar artículo actual",
|
||||
"pruneTooltip": "Purgar {{item_count}} elementos completados",
|
||||
"batchQueued": "Lote en cola",
|
||||
"pending": "Pendiente",
|
||||
"item": "Elemento",
|
||||
"total": "Total",
|
||||
"in_progress": "En proceso",
|
||||
"failed": "Fallido",
|
||||
"completedIn": "Completado en",
|
||||
"upscaling": "Upscaling",
|
||||
"canvas": "Lienzo",
|
||||
"generation": "Generación",
|
||||
"workflows": "Flujo de trabajo",
|
||||
"other": "Otro",
|
||||
"queueFront": "Añadir al principio de la cola",
|
||||
"gallery": "Galería",
|
||||
"batchFieldValues": "Valores de procesamiento por lotes",
|
||||
"session": "Sesión",
|
||||
"notReady": "La cola aún no está lista",
|
||||
"graphQueued": "Gráfico en cola",
|
||||
"clearQueueAlertDialog2": "¿Estás seguro que deseas vaciar la cola?",
|
||||
"next": "Siguiente",
|
||||
"iterations_one": "Interacción",
|
||||
"iterations_many": "Interacciones",
|
||||
"iterations_other": "Interacciones",
|
||||
"current": "Actual",
|
||||
"queue": "Cola",
|
||||
"queueBack": "Añadir a la cola",
|
||||
"cancelSucceeded": "Elemento cancelado",
|
||||
"clearTooltip": "Cancelar y limpiar todos los elementos",
|
||||
"clearSucceeded": "Cola vaciada",
|
||||
"canceled": "Cancelado",
|
||||
"batch": "Lote",
|
||||
"graphFailedToQueue": "Error al poner el gráfico en cola",
|
||||
"batchFailedToQueue": "Error al poner en cola el lote",
|
||||
"prompts_one": "Prompt",
|
||||
"prompts_many": "Prompts",
|
||||
"prompts_other": "Prompts",
|
||||
"prune": "Eliminar"
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invitar compañeros de equipo",
|
||||
"shareAccess": "Compartir acceso",
|
||||
"professionalUpsell": "Disponible en la edición profesional de Invoke. Haz clic aquí o visita invoke.com/pricing para obtener más detalles."
|
||||
"professionalUpsell": "Disponible en la edición profesional de Invoke. Haga clic aquí o visite invoke.com/pricing para obtener más detalles."
|
||||
},
|
||||
"controlLayers": {
|
||||
"layer_one": "Capa",
|
||||
@@ -330,6 +498,415 @@
|
||||
"layer_other": "Capas",
|
||||
"layer_withCount_one": "({{count}}) capa",
|
||||
"layer_withCount_many": "({{count}}) capas",
|
||||
"layer_withCount_other": "({{count}}) capas"
|
||||
"layer_withCount_other": "({{count}}) capas",
|
||||
"copyToClipboard": "Copiar al portapapeles"
|
||||
},
|
||||
"whatsNew": {
|
||||
"readReleaseNotes": "Leer las notas de la versión",
|
||||
"watchRecentReleaseVideos": "Ver videos de versiones recientes",
|
||||
"watchUiUpdatesOverview": "Descripción general de las actualizaciones de la interfaz de usuario de Watch",
|
||||
"whatsNewInInvoke": "Novedades en Invoke",
|
||||
"items": [
|
||||
"<StrongComponent>SD 3.5</StrongComponent>: compatibilidad con SD 3.5 Medium y Large.",
|
||||
"<StrongComponent>Lienzo</StrongComponent>: Se ha simplificado el procesamiento de la capa de control y se ha mejorado la configuración predeterminada del control."
|
||||
]
|
||||
},
|
||||
"invocationCache": {
|
||||
"enableFailed": "Error al activar la cache",
|
||||
"cacheSize": "Tamaño de la caché",
|
||||
"hits": "Accesos a la caché",
|
||||
"invocationCache": "Caché",
|
||||
"misses": "Errores de la caché",
|
||||
"clear": "Limpiar",
|
||||
"maxCacheSize": "Tamaño máximo de la caché",
|
||||
"enableSucceeded": "Cache activada",
|
||||
"clearFailed": "Error al borrar la cache",
|
||||
"enable": "Activar",
|
||||
"useCache": "Uso de la caché",
|
||||
"disableSucceeded": "Caché desactivada",
|
||||
"clearSucceeded": "Caché borrada",
|
||||
"disable": "Desactivar",
|
||||
"disableFailed": "Error al desactivar la caché"
|
||||
},
|
||||
"hrf": {
|
||||
"hrf": "Solución de alta resolución",
|
||||
"enableHrf": "Activar corrección de alta resolución",
|
||||
"metadata": {
|
||||
"enabled": "Corrección de alta resolución activada",
|
||||
"strength": "Forzar la corrección de alta resolución",
|
||||
"method": "Método de corrección de alta resolución"
|
||||
},
|
||||
"upscaleMethod": "Método de expansión"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Añadir activador de los avisos",
|
||||
"compatibleEmbeddings": "Incrustaciones compatibles",
|
||||
"noMatchingTriggers": "No hay activadores coincidentes"
|
||||
},
|
||||
"hotkeys": {
|
||||
"hotkeys": "Atajo del teclado",
|
||||
"canvas": {
|
||||
"selectViewTool": {
|
||||
"desc": "Selecciona la herramienta de Visualización.",
|
||||
"title": "Visualización"
|
||||
},
|
||||
"cancelFilter": {
|
||||
"title": "Cancelar el filtro",
|
||||
"desc": "Cancelar el filtro pendiente."
|
||||
},
|
||||
"applyTransform": {
|
||||
"title": "Aplicar la transformación",
|
||||
"desc": "Aplicar la transformación pendiente a la capa seleccionada."
|
||||
},
|
||||
"applyFilter": {
|
||||
"desc": "Aplicar el filtro pendiente a la capa seleccionada.",
|
||||
"title": "Aplicar filtro"
|
||||
},
|
||||
"selectBrushTool": {
|
||||
"title": "Pincel",
|
||||
"desc": "Selecciona la herramienta pincel."
|
||||
},
|
||||
"selectBboxTool": {
|
||||
"desc": "Seleccionar la herramienta de selección del marco.",
|
||||
"title": "Selección del marco"
|
||||
},
|
||||
"selectMoveTool": {
|
||||
"desc": "Selecciona la herramienta Mover.",
|
||||
"title": "Mover"
|
||||
},
|
||||
"selectRectTool": {
|
||||
"title": "Rectángulo",
|
||||
"desc": "Selecciona la herramienta Rectángulo."
|
||||
},
|
||||
"decrementToolWidth": {
|
||||
"title": "Reducir el ancho de la herramienta",
|
||||
"desc": "Disminuye la anchura de la herramienta pincel o goma de borrar, según la que esté seleccionada."
|
||||
},
|
||||
"incrementToolWidth": {
|
||||
"title": "Incrementar la anchura de la herramienta",
|
||||
"desc": "Aumenta la anchura de la herramienta pincel o goma de borrar, según la que esté seleccionada."
|
||||
},
|
||||
"fitBboxToCanvas": {
|
||||
"title": "Ajustar bordes al lienzo",
|
||||
"desc": "Escala y posiciona la vista para ajustarla a los bodes."
|
||||
},
|
||||
"fitLayersToCanvas": {
|
||||
"title": "Ajustar capas al lienzo",
|
||||
"desc": "Escala y posiciona la vista para que se ajuste a todas las capas visibles."
|
||||
},
|
||||
"setFillToWhite": {
|
||||
"title": "Establecer color en blanco",
|
||||
"desc": "Establece el color actual de la herramienta en blanco."
|
||||
},
|
||||
"resetSelected": {
|
||||
"title": "Restablecer capa",
|
||||
"desc": "Restablecer la capa seleccionada. Solo se aplica a Máscara de retoque y Guía regional."
|
||||
},
|
||||
"setZoomTo400Percent": {
|
||||
"desc": "Ajuste la aplicación del lienzo al 400%.",
|
||||
"title": "Ampliar al 400%"
|
||||
},
|
||||
"transformSelected": {
|
||||
"desc": "Transformar la capa seleccionada.",
|
||||
"title": "Transformar"
|
||||
},
|
||||
"selectColorPickerTool": {
|
||||
"title": "Selector de color",
|
||||
"desc": "Seleccione la herramienta de selección de color."
|
||||
},
|
||||
"selectEraserTool": {
|
||||
"title": "Borrador",
|
||||
"desc": "Selecciona la herramienta Borrador."
|
||||
},
|
||||
"setZoomTo100Percent": {
|
||||
"title": "Ampliar al 100%",
|
||||
"desc": "Ajuste ampliar el lienzo al 100%."
|
||||
},
|
||||
"undo": {
|
||||
"title": "Deshacer",
|
||||
"desc": "Deshacer la última acción en el lienzo."
|
||||
},
|
||||
"nextEntity": {
|
||||
"desc": "Seleccione la siguiente capa de la lista.",
|
||||
"title": "Capa siguiente"
|
||||
},
|
||||
"redo": {
|
||||
"title": "Rehacer",
|
||||
"desc": "Rehacer la última acción en el lienzo."
|
||||
},
|
||||
"prevEntity": {
|
||||
"title": "Capa anterior",
|
||||
"desc": "Seleccione la capa anterior de la lista."
|
||||
},
|
||||
"title": "Lienzo",
|
||||
"setZoomTo200Percent": {
|
||||
"title": "Ampliar al 200%",
|
||||
"desc": "Ajuste la ampliación del lienzo al 200%."
|
||||
},
|
||||
"setZoomTo800Percent": {
|
||||
"title": "Ampliar al 800%",
|
||||
"desc": "Ajuste la ampliación del lienzo al 800%."
|
||||
},
|
||||
"filterSelected": {
|
||||
"desc": "Filtra la capa seleccionada. Solo se aplica a las capas Ráster y Control.",
|
||||
"title": "Filtrar"
|
||||
},
|
||||
"cancelTransform": {
|
||||
"title": "Cancelar transformación",
|
||||
"desc": "Cancelar la transformación pendiente."
|
||||
},
|
||||
"deleteSelected": {
|
||||
"title": "Borrar la capa",
|
||||
"desc": "Borrar la capa seleccionada."
|
||||
},
|
||||
"quickSwitch": {
|
||||
"desc": "Cambiar entre las dos últimas capas seleccionadas. Si una capa está seleccionada, cambia siempre entre ella y la última capa no seleccionada.",
|
||||
"title": "Cambio rápido de capa"
|
||||
}
|
||||
},
|
||||
"app": {
|
||||
"selectModelsTab": {
|
||||
"title": "Seleccione la pestaña Modelos",
|
||||
"desc": "Selecciona la pestaña Modelos."
|
||||
},
|
||||
"focusPrompt": {
|
||||
"desc": "Mueve el foco del cursor a la indicación positiva.",
|
||||
"title": "Enfoque"
|
||||
},
|
||||
"toggleLeftPanel": {
|
||||
"title": "Alternar panel izquierdo",
|
||||
"desc": "Mostrar u ocultar el panel izquierdo."
|
||||
},
|
||||
"selectQueueTab": {
|
||||
"title": "Seleccione la pestaña Cola",
|
||||
"desc": "Seleccione la pestaña Cola."
|
||||
},
|
||||
"selectCanvasTab": {
|
||||
"title": "Seleccione la pestaña Lienzo",
|
||||
"desc": "Selecciona la pestaña Lienzo."
|
||||
},
|
||||
"clearQueue": {
|
||||
"title": "Vaciar cola",
|
||||
"desc": "Cancelar y variar todos los elementos de la cola."
|
||||
},
|
||||
"selectUpscalingTab": {
|
||||
"title": "Selecciona la pestaña Ampliar",
|
||||
"desc": "Selecciona la pestaña Aumento de escala."
|
||||
},
|
||||
"togglePanels": {
|
||||
"desc": "Muestra u oculta los paneles izquierdo y derecho a la vez.",
|
||||
"title": "Alternar paneles"
|
||||
},
|
||||
"toggleRightPanel": {
|
||||
"title": "Alternar panel derecho",
|
||||
"desc": "Mostrar u ocultar el panel derecho."
|
||||
},
|
||||
"invokeFront": {
|
||||
"desc": "Pone en cola la solicitud de compilación y la agrega al principio de la cola.",
|
||||
"title": "Invocar (frente)"
|
||||
},
|
||||
"cancelQueueItem": {
|
||||
"title": "Cancelar",
|
||||
"desc": "Cancelar el elemento de la cola que se está procesando."
|
||||
},
|
||||
"invoke": {
|
||||
"desc": "Pone en cola la solicitud de compilación y la agrega al final de la cola.",
|
||||
"title": "Invocar"
|
||||
},
|
||||
"title": "Aplicación",
|
||||
"selectWorkflowsTab": {
|
||||
"title": "Seleccione la pestaña Flujos de trabajo",
|
||||
"desc": "Selecciona la pestaña Flujos de trabajo."
|
||||
},
|
||||
"resetPanelLayout": {
|
||||
"title": "Reiniciar la posición del panel",
|
||||
"desc": "Restablece los paneles izquierdo y derecho a su tamaño y disposición por defecto."
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
"addNode": {
|
||||
"title": "Añadir nodo",
|
||||
"desc": "Abrir añadir nodo."
|
||||
},
|
||||
"selectAll": {
|
||||
"title": "Seleccionar todo",
|
||||
"desc": "Seleccione todos los nodos y enlaces."
|
||||
},
|
||||
"deleteSelection": {
|
||||
"desc": "Borrar todos los nodos y enlaces seleccionados.",
|
||||
"title": "Borrar"
|
||||
},
|
||||
"undo": {
|
||||
"desc": "Deshaga la última acción.",
|
||||
"title": "Deshacer"
|
||||
},
|
||||
"redo": {
|
||||
"desc": "Rehacer la última acción.",
|
||||
"title": "Rehacer"
|
||||
},
|
||||
"pasteSelection": {
|
||||
"desc": "Pegar nodos y bordes copiados.",
|
||||
"title": "Pegar"
|
||||
},
|
||||
"title": "Flujos de trabajo",
|
||||
"copySelection": {
|
||||
"desc": "Copiar nodos y bordes seleccionados.",
|
||||
"title": "Copiar"
|
||||
},
|
||||
"pasteSelectionWithEdges": {
|
||||
"desc": "Pega los nodos copiados, los enlaces y todos los enlaces conectados a los nodos copiados.",
|
||||
"title": "Pegar con enlaces"
|
||||
}
|
||||
},
|
||||
"viewer": {
|
||||
"useSize": {
|
||||
"title": "Usar dimensiones",
|
||||
"desc": "Utiliza las dimensiones de la imagen actual como el tamaño del borde."
|
||||
},
|
||||
"remix": {
|
||||
"title": "Remezcla",
|
||||
"desc": "Recupera todos los metadatos excepto la semilla de la imagen actual."
|
||||
},
|
||||
"loadWorkflow": {
|
||||
"desc": "Carga el flujo de trabajo guardado de la imagen actual (si tiene uno).",
|
||||
"title": "Cargar flujo de trabajo"
|
||||
},
|
||||
"recallAll": {
|
||||
"desc": "Recupera todos los metadatos de la imagen actual.",
|
||||
"title": "Recuperar todos los metadatos"
|
||||
},
|
||||
"recallPrompts": {
|
||||
"desc": "Recuerde las indicaciones positivas y negativas de la imagen actual.",
|
||||
"title": "Recordatorios"
|
||||
},
|
||||
"recallSeed": {
|
||||
"title": "Recuperar semilla",
|
||||
"desc": "Recupera la semilla de la imagen actual."
|
||||
},
|
||||
"runPostprocessing": {
|
||||
"title": "Ejecutar posprocesamiento",
|
||||
"desc": "Ejecutar el posprocesamiento seleccionado en la imagen actual."
|
||||
},
|
||||
"toggleMetadata": {
|
||||
"title": "Mostrar/ocultar los metadatos",
|
||||
"desc": "Mostrar u ocultar la superposición de metadatos de la imagen actual."
|
||||
},
|
||||
"nextComparisonMode": {
|
||||
"desc": "Desplácese por los modos de comparación.",
|
||||
"title": "Siguiente comparación"
|
||||
},
|
||||
"title": "Visor de imágenes",
|
||||
"toggleViewer": {
|
||||
"title": "Mostrar/Ocultar el visor de imágenes",
|
||||
"desc": "Mostrar u ocultar el visor de imágenes. Solo disponible en la pestaña Lienzo."
|
||||
},
|
||||
"swapImages": {
|
||||
"title": "Intercambiar imágenes en la comparación",
|
||||
"desc": "Intercambia las imágenes que se están comparando."
|
||||
}
|
||||
},
|
||||
"gallery": {
|
||||
"clearSelection": {
|
||||
"title": "Limpiar selección",
|
||||
"desc": "Borrar la selección actual, si hay alguna."
|
||||
},
|
||||
"galleryNavUp": {
|
||||
"title": "Subir",
|
||||
"desc": "Navega hacia arriba en la cuadrícula de la galería y selecciona esa imagen. Si estás en la parte superior de la página, ve a la página anterior."
|
||||
},
|
||||
"galleryNavLeft": {
|
||||
"title": "Izquierda",
|
||||
"desc": "Navegue hacia la izquierda en la rejilla de la galería, seleccionando esa imagen. Si está en la primera imagen de la fila, vaya a la fila anterior. Si está en la primera imagen de la página, vaya a la página anterior."
|
||||
},
|
||||
"galleryNavDown": {
|
||||
"title": "Bajar",
|
||||
"desc": "Navegue hacia abajo en la parrilla de la galería, seleccionando esa imagen. Si se encuentra al final de la página, vaya a la página siguiente."
|
||||
},
|
||||
"galleryNavRight": {
|
||||
"title": "A la derecha",
|
||||
"desc": "Navegue hacia la derecha en la rejilla de la galería, seleccionando esa imagen. Si está en la última imagen de la fila, vaya a la fila siguiente. Si está en la última imagen de la página, vaya a la página siguiente."
|
||||
},
|
||||
"galleryNavUpAlt": {
|
||||
"desc": "Igual que arriba, pero selecciona la imagen de comparación, abriendo el modo de comparación si no está ya abierto.",
|
||||
"title": "Arriba (Comparar imagen)"
|
||||
},
|
||||
"deleteSelection": {
|
||||
"desc": "Borrar todas las imágenes seleccionadas. Por defecto, se le pedirá que confirme la eliminación. Si las imágenes están actualmente en uso en la aplicación, se te avisará.",
|
||||
"title": "Borrar"
|
||||
},
|
||||
"title": "Galería",
|
||||
"selectAllOnPage": {
|
||||
"title": "Seleccionar todo en la página",
|
||||
"desc": "Seleccionar todas las imágenes en la página actual."
|
||||
}
|
||||
},
|
||||
"searchHotkeys": "Buscar teclas de acceso rápido",
|
||||
"noHotkeysFound": "Sin teclas de acceso rápido",
|
||||
"clearSearch": "Limpiar la búsqueda"
|
||||
},
|
||||
"metadata": {
|
||||
"guidance": "Orientación",
|
||||
"createdBy": "Creado por",
|
||||
"noImageDetails": "Sin detalles en la imagen",
|
||||
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
|
||||
"height": "Altura",
|
||||
"imageDimensions": "Dimensiones de la imagen",
|
||||
"seamlessXAxis": "Eje X sin juntas",
|
||||
"seamlessYAxis": "Eje Y sin juntas",
|
||||
"generationMode": "Modo de generación",
|
||||
"scheduler": "Programador",
|
||||
"width": "Ancho",
|
||||
"Threshold": "Umbral de ruido",
|
||||
"canvasV2Metadata": "Lienzo",
|
||||
"metadata": "Metadatos",
|
||||
"model": "Modelo",
|
||||
"allPrompts": "Todas las indicaciones",
|
||||
"cfgScale": "Escala CFG",
|
||||
"imageDetails": "Detalles de la imagen",
|
||||
"negativePrompt": "Indicación negativa",
|
||||
"noMetaData": "Sin metadatos",
|
||||
"parameterSet": "Parámetro {{parameter}} establecido",
|
||||
"vae": "Autocodificador",
|
||||
"workflow": "Flujo de trabajo",
|
||||
"seed": "Semilla",
|
||||
"strength": "Forzar imagen a imagen",
|
||||
"recallParameters": "Parámetros de recuperación",
|
||||
"recallParameter": "Recuperar {{label}}",
|
||||
"steps": "Pasos",
|
||||
"noRecallParameters": "Sin parámetros para recuperar",
|
||||
"parsingFailed": "Error al analizar"
|
||||
},
|
||||
"system": {
|
||||
"logLevel": {
|
||||
"debug": "Depurar",
|
||||
"info": "Información",
|
||||
"warn": "Advertir",
|
||||
"fatal": "Grave",
|
||||
"error": "Error",
|
||||
"trace": "Rastro",
|
||||
"logLevel": "Nivel del registro"
|
||||
},
|
||||
"enableLogging": "Activar registro",
|
||||
"logNamespaces": {
|
||||
"workflows": "Flujos de trabajo",
|
||||
"system": "Sistema",
|
||||
"metadata": "Metadatos",
|
||||
"gallery": "Galería",
|
||||
"logNamespaces": "Espacios para los nombres de registro",
|
||||
"generation": "Generación",
|
||||
"events": "Eventos",
|
||||
"canvas": "Lienzo",
|
||||
"config": "Ajustes",
|
||||
"models": "Modelos",
|
||||
"queue": "Cola"
|
||||
}
|
||||
},
|
||||
"newUserExperience": {
|
||||
"downloadStarterModels": "Descargar modelos de inicio",
|
||||
"toGetStarted": "Para empezar, introduzca un mensaje en el cuadro y haga clic en <StrongComponent>Invocar</StrongComponent> para generar su primera imagen. Seleccione una plantilla para mejorar los resultados. Puede elegir guardar sus imágenes directamente en <StrongComponent>Galería</StrongComponent> o editarlas en <StrongComponent>Lienzo</StrongComponent>.",
|
||||
"importModels": "Importar modelos",
|
||||
"noModelsInstalled": "Parece que no tienes ningún modelo instalado",
|
||||
"gettingStartedSeries": "¿Desea más orientación? Consulte nuestra <LinkComponent>Serie de introducción</LinkComponent> para obtener consejos sobre cómo aprovechar todo el potencial de Invoke Studio.",
|
||||
"toGetStartedLocal": "Para empezar, asegúrate de descargar o importar los modelos necesarios para ejecutar Invoke. A continuación, introduzca un mensaje en el cuadro y haga clic en <StrongComponent>Invocar</StrongComponent> para generar su primera imagen. Seleccione una plantilla para mejorar los resultados. Puede elegir guardar sus imágenes directamente en <StrongComponent>Galería</StrongComponent> o editarlas en el <StrongComponent>Lienzo</StrongComponent>."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,19 +317,6 @@
|
||||
"info": "Info",
|
||||
"showOptionsPanel": "Afficher le panneau latéral (O ou T)",
|
||||
"invoke": {
|
||||
"layer": {
|
||||
"rgNoPromptsOrIPAdapters": "aucun prompts ou IP Adapters",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, la largeur de la bounding box mise à l'échelle est {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, la hauteur de la bounding box mise à l'échelle est {{height}}",
|
||||
"ipAdapterNoModelSelected": "aucun IP adapter sélectionné",
|
||||
"ipAdapterNoImageSelected": "aucune image d'IP adapter sélectionnée",
|
||||
"controlAdapterIncompatibleBaseModel": "modèle de base de Control Adapter incompatible",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, la hauteur de la bounding box est {{height}}",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, la largeur de la bounding box est {{width}}",
|
||||
"ipAdapterIncompatibleBaseModel": "modèle de base d'IP adapter incompatible",
|
||||
"rgNoRegion": "aucune zone sélectionnée",
|
||||
"controlAdapterNoModelSelected": "aucun modèle de Control Adapter sélectionné"
|
||||
},
|
||||
"noPrompts": "Aucun prompts généré",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} entrée manquante",
|
||||
"missingFieldTemplate": "Modèle de champ manquant",
|
||||
@@ -1985,7 +1972,6 @@
|
||||
"inpaintMask_withCount_many": "Remplir les masques",
|
||||
"inpaintMask_withCount_other": "Remplir les masques",
|
||||
"newImg2ImgCanvasFromImage": "Nouvelle Img2Img à partir de l'image",
|
||||
"resetCanvas": "Réinitialiser la Toile",
|
||||
"bboxOverlay": "Afficher la superposition des Bounding Box",
|
||||
"moveToFront": "Déplacer vers le permier plan",
|
||||
"moveToBack": "Déplacer vers l'arrière plan",
|
||||
@@ -2034,7 +2020,6 @@
|
||||
"help2": "Commencez par un point <Bold>Inclure</Bold> au sein de l'objet cible. Ajoutez d'autres points pour affiner la sélection. Moins de points produisent généralement de meilleurs résultats.",
|
||||
"help3": "Inversez la sélection pour sélectionner tout sauf l'objet cible."
|
||||
},
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) en tant que $t(controlLayers.controlLayer)",
|
||||
"convertRegionalGuidanceTo": "Convertir $t(controlLayers.regionalGuidance) vers",
|
||||
"copyRasterLayerTo": "Copier $t(controlLayers.rasterLayer) vers",
|
||||
"newControlLayer": "Nouveau $t(controlLayers.controlLayer)",
|
||||
@@ -2044,8 +2029,7 @@
|
||||
"convertInpaintMaskTo": "Convertir $t(controlLayers.inpaintMask) vers",
|
||||
"copyControlLayerTo": "Copier $t(controlLayers.controlLayer) vers",
|
||||
"newInpaintMask": "Nouveau $t(controlLayers.inpaintMask)",
|
||||
"newRasterLayer": "Nouveau $t(controlLayers.rasterLayer)",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) en tant que $t(controlLayers.rasterLayer)"
|
||||
"newRasterLayer": "Nouveau $t(controlLayers.rasterLayer)"
|
||||
},
|
||||
"upscaling": {
|
||||
"exceedsMaxSizeDetails": "La limite maximale d'agrandissement est de {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixels. Veuillez essayer une image plus petite ou réduire votre sélection d'échelle.",
|
||||
|
||||
@@ -94,7 +94,10 @@
|
||||
"view": "Vista",
|
||||
"close": "Chiudi",
|
||||
"clipboard": "Appunti",
|
||||
"ok": "Ok"
|
||||
"ok": "Ok",
|
||||
"generating": "Generazione",
|
||||
"loadingModel": "Caricamento del modello",
|
||||
"warnings": "Avvisi"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -597,7 +600,18 @@
|
||||
"huggingFace": "HuggingFace",
|
||||
"huggingFaceRepoID": "HuggingFace Repository ID",
|
||||
"clipEmbed": "CLIP Embed",
|
||||
"t5Encoder": "T5 Encoder"
|
||||
"t5Encoder": "T5 Encoder",
|
||||
"hfTokenInvalidErrorMessage": "Gettone HuggingFace non valido o mancante.",
|
||||
"hfTokenRequired": "Stai tentando di scaricare un modello che richiede un gettone HuggingFace valido.",
|
||||
"hfTokenUnableToVerifyErrorMessage": "Impossibile verificare il gettone HuggingFace. Ciò è probabilmente dovuto a un errore di rete. Riprova più tardi.",
|
||||
"hfTokenHelperText": "Per utilizzare alcuni modelli è necessario un gettone HF. Fai clic qui per creare o ottenere il tuo gettone.",
|
||||
"hfTokenInvalid": "Gettone HF non valido o mancante",
|
||||
"hfTokenUnableToVerify": "Impossibile verificare il gettone HF",
|
||||
"hfTokenSaved": "Gettone HF salvato",
|
||||
"hfForbidden": "Non hai accesso a questo modello HF",
|
||||
"hfTokenLabel": "Gettone HuggingFace (richiesto per alcuni modelli)",
|
||||
"hfForbiddenErrorMessage": "Consigliamo di visitare la pagina del repository su HuggingFace.com. Il proprietario potrebbe richiedere l'accettazione dei termini per poter effettuare il download.",
|
||||
"hfTokenInvalidErrorMessage2": "Aggiornalo in "
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -649,21 +663,8 @@
|
||||
"addingImagesTo": "Aggiungi immagini a",
|
||||
"systemDisconnected": "Sistema disconnesso",
|
||||
"missingNodeTemplate": "Modello di nodo mancante",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} ingresso mancante",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}}: ingresso mancante",
|
||||
"missingFieldTemplate": "Modello di campo mancante",
|
||||
"layer": {
|
||||
"controlAdapterNoModelSelected": "Nessun modello di adattatore di controllo selezionato",
|
||||
"controlAdapterIncompatibleBaseModel": "Il modello base dell'adattatore di controllo non è compatibile",
|
||||
"ipAdapterNoModelSelected": "Nessun adattatore IP selezionato",
|
||||
"ipAdapterIncompatibleBaseModel": "Il modello base dell'adattatore IP non è compatibile",
|
||||
"ipAdapterNoImageSelected": "Nessuna immagine dell'adattatore IP selezionata",
|
||||
"rgNoPromptsOrIPAdapters": "Nessun prompt o adattatore IP",
|
||||
"rgNoRegion": "Nessuna regione selezionata",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, larghezza riquadro è {{width}}",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, altezza riquadro è {{height}}",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, larghezza del riquadro scalato {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, altezza del riquadro scalato {{height}}"
|
||||
},
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), altezza riquadro è {{height}}",
|
||||
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), larghezza riquadro è {{width}}",
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), larghezza del riquadro scalato è {{width}}",
|
||||
@@ -671,10 +672,14 @@
|
||||
"noT5EncoderModelSelected": "Nessun modello di encoder T5 selezionato per la generazione con FLUX",
|
||||
"noCLIPEmbedModelSelected": "Nessun modello CLIP Embed selezionato per la generazione con FLUX",
|
||||
"noFLUXVAEModelSelected": "Nessun modello VAE selezionato per la generazione con FLUX",
|
||||
"canvasIsTransforming": "La tela sta trasformando",
|
||||
"canvasIsRasterizing": "La tela sta rasterizzando",
|
||||
"canvasIsCompositing": "La tela è in fase di composizione",
|
||||
"canvasIsFiltering": "La tela sta filtrando"
|
||||
"canvasIsTransforming": "La tela è occupata (sta trasformando)",
|
||||
"canvasIsRasterizing": "La tela è occupata (sta rasterizzando)",
|
||||
"canvasIsCompositing": "La tela è occupata (in composizione)",
|
||||
"canvasIsFiltering": "La tela è occupata (sta filtrando)",
|
||||
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}}: troppi elementi, massimo {{maxItems}}",
|
||||
"canvasIsSelectingObject": "La tela è occupata (selezione dell'oggetto)",
|
||||
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}}: troppi pochi elementi, minimo {{minItems}}",
|
||||
"collectionEmpty": "{{nodeLabel}} -> {{fieldLabel}} raccolta vuota"
|
||||
},
|
||||
"useCpuNoise": "Usa la CPU per generare rumore",
|
||||
"iterations": "Iterazioni",
|
||||
@@ -699,7 +704,9 @@
|
||||
"staged": "Maschera espansa",
|
||||
"optimizedImageToImage": "Immagine-a-immagine ottimizzata",
|
||||
"sendToCanvas": "Invia alla Tela",
|
||||
"coherenceMinDenoise": "Riduzione minima del rumore"
|
||||
"coherenceMinDenoise": "Min rid. rumore",
|
||||
"recallMetadata": "Richiama i metadati",
|
||||
"disabledNoRasterContent": "Disabilitato (nessun contenuto Raster)"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelli",
|
||||
@@ -737,7 +744,8 @@
|
||||
"confirmOnNewSession": "Conferma su nuova sessione",
|
||||
"enableModelDescriptions": "Abilita le descrizioni dei modelli nei menu a discesa",
|
||||
"modelDescriptionsDisabled": "Descrizioni dei modelli nei menu a discesa disabilitate",
|
||||
"modelDescriptionsDisabledDesc": "Le descrizioni dei modelli nei menu a discesa sono state disabilitate. Abilitale nelle Impostazioni."
|
||||
"modelDescriptionsDisabledDesc": "Le descrizioni dei modelli nei menu a discesa sono state disabilitate. Abilitale nelle Impostazioni.",
|
||||
"showDetailedInvocationProgress": "Mostra dettagli avanzamento"
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "Caricamento fallito",
|
||||
@@ -956,7 +964,9 @@
|
||||
"saveToGallery": "Salva nella Galleria",
|
||||
"noMatchingWorkflows": "Nessun flusso di lavoro corrispondente",
|
||||
"noWorkflows": "Nessun flusso di lavoro",
|
||||
"workflowHelpText": "Hai bisogno di aiuto? Consulta la nostra guida <LinkComponent>Introduzione ai flussi di lavoro</LinkComponent>."
|
||||
"workflowHelpText": "Hai bisogno di aiuto? Consulta la nostra guida <LinkComponent>Introduzione ai flussi di lavoro</LinkComponent>.",
|
||||
"specialDesc": "Questa invocazione comporta una gestione speciale nell'applicazione. Ad esempio, i nodi Lotto vengono utilizzati per mettere in coda più grafici da un singolo flusso di lavoro.",
|
||||
"internalDesc": "Questa invocazione è utilizzata internamente da Invoke. Potrebbe subire modifiche significative durante gli aggiornamenti dell'app e potrebbe essere rimossa in qualsiasi momento."
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@@ -1077,7 +1087,8 @@
|
||||
"workflows": "Flussi di lavoro",
|
||||
"generation": "Generazione",
|
||||
"other": "Altro",
|
||||
"gallery": "Galleria"
|
||||
"gallery": "Galleria",
|
||||
"batchSize": "Dimensione del lotto"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "Nessun modello corrispondente",
|
||||
@@ -1179,8 +1190,9 @@
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Percentuale passi Inizio / Fine",
|
||||
"paragraphs": [
|
||||
"La parte del processo di rimozione del rumore in cui verrà applicato l'adattatore di controllo.",
|
||||
"In genere, gli adattatori di controllo applicati all'inizio del processo guidano la composizione, mentre quelli applicati alla fine guidano i dettagli."
|
||||
"Questa impostazione determina quale parte del processo di rimozione del rumore (generazione) incorpora la guida da questo livello.",
|
||||
"• Passo iniziale (%): specifica quando iniziare ad applicare la guida da questo livello durante il processo di generazione.",
|
||||
"• Passo finale (%): specifica quando interrompere l'applicazione della guida di questo livello e ripristinare la guida generale dal modello e altre impostazioni."
|
||||
]
|
||||
},
|
||||
"noiseUseCPU": {
|
||||
@@ -1263,8 +1275,9 @@
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
"paragraphs": [
|
||||
"Quanto rumore viene aggiunto all'immagine in ingresso.",
|
||||
"0 risulterà in un'immagine identica, mentre 1 risulterà in un'immagine completamente nuova."
|
||||
"Controlla la differenza tra l'immagine generata e il/i livello/i raster.",
|
||||
"Una forza inferiore rimane più vicina ai livelli raster visibili combinati. Una forza superiore si basa maggiormente sul prompt globale.",
|
||||
"Se non sono presenti livelli raster con contenuto visibile, questa impostazione viene ignorata."
|
||||
],
|
||||
"heading": "Forza di riduzione del rumore"
|
||||
},
|
||||
@@ -1276,14 +1289,16 @@
|
||||
},
|
||||
"infillMethod": {
|
||||
"paragraphs": [
|
||||
"Metodo di riempimento durante il processo di Outpainting o Inpainting."
|
||||
"Metodo di riempimento durante il processo di Outpaint o Inpaint."
|
||||
],
|
||||
"heading": "Metodo di riempimento"
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "Peso",
|
||||
"paragraphs": [
|
||||
"Peso dell'adattatore di controllo. Un peso maggiore porterà a impatti maggiori sull'immagine finale."
|
||||
"Regola la forza con cui il livello influenza il processo di generazione",
|
||||
"• Peso maggiore (0.75-2): crea un impatto più significativo sul risultato finale.",
|
||||
"• Peso inferiore (0-0.75): crea un impatto minore sul risultato finale."
|
||||
]
|
||||
},
|
||||
"paramCFGScale": {
|
||||
@@ -1444,7 +1459,7 @@
|
||||
"heading": "Livello minimo di riduzione del rumore",
|
||||
"paragraphs": [
|
||||
"Intensità minima di riduzione rumore per la modalità di Coerenza",
|
||||
"L'intensità minima di riduzione del rumore per la regione di coerenza durante l'inpainting o l'outpainting"
|
||||
"L'intensità minima di riduzione del rumore per la regione di coerenza durante l'inpaint o l'outpaint"
|
||||
]
|
||||
},
|
||||
"compositingMaskBlur": {
|
||||
@@ -1460,9 +1475,9 @@
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Metodo",
|
||||
"heading": "Modalità",
|
||||
"paragraphs": [
|
||||
"Metodo con cui applicare l'adattatore IP corrente."
|
||||
"La modalità definisce il modo in cui l'immagine di riferimento guiderà il processo di generazione."
|
||||
]
|
||||
},
|
||||
"scale": {
|
||||
@@ -1498,7 +1513,7 @@
|
||||
"optimizedDenoising": {
|
||||
"heading": "Immagine-a-immagine ottimizzata",
|
||||
"paragraphs": [
|
||||
"Abilita 'Immagine-a-immagine ottimizzata' per una scala di riduzione del rumore più graduale per le trasformazioni da immagine a immagine e di inpainting con modelli Flux. Questa impostazione migliora la capacità di controllare la quantità di modifica applicata a un'immagine, ma può essere disattivata se preferisci usare la scala di riduzione rumore standard. Questa impostazione è ancora in fase di messa a punto ed è in stato beta."
|
||||
"Abilita 'Immagine-a-immagine ottimizzata' per una scala di riduzione del rumore più graduale per le trasformazioni da immagine a immagine e di inpaint con modelli Flux. Questa impostazione migliora la capacità di controllare la quantità di modifica applicata a un'immagine, ma può essere disattivata se preferisci usare la scala di riduzione rumore standard. Questa impostazione è ancora in fase di messa a punto ed è in stato beta."
|
||||
]
|
||||
},
|
||||
"paramGuidance": {
|
||||
@@ -1733,8 +1748,7 @@
|
||||
"newRegionalReferenceImageError": "Problema nella creazione dell'immagine di riferimento regionale",
|
||||
"newControlLayerOk": "Livello di controllo creato",
|
||||
"bboxOverlay": "Mostra sovrapposizione riquadro",
|
||||
"resetCanvas": "Reimposta la tela",
|
||||
"outputOnlyMaskedRegions": "Solo regioni mascherate in uscita",
|
||||
"outputOnlyMaskedRegions": "In uscita solo le regioni generate",
|
||||
"enableAutoNegative": "Abilita Auto Negativo",
|
||||
"disableAutoNegative": "Disabilita Auto Negativo",
|
||||
"showHUD": "Mostra HUD",
|
||||
@@ -1771,7 +1785,7 @@
|
||||
"globalReferenceImage_withCount_many": "Immagini di riferimento Globali",
|
||||
"globalReferenceImage_withCount_other": "Immagini di riferimento Globali",
|
||||
"controlMode": {
|
||||
"balanced": "Bilanciato",
|
||||
"balanced": "Bilanciato (consigliato)",
|
||||
"controlMode": "Modalità di controllo",
|
||||
"prompt": "Prompt",
|
||||
"control": "Controllo",
|
||||
@@ -1782,10 +1796,13 @@
|
||||
"beginEndStepPercentShort": "Inizio/Fine %",
|
||||
"stagingOnCanvas": "Genera immagini nella",
|
||||
"ipAdapterMethod": {
|
||||
"full": "Completo",
|
||||
"full": "Stile e Composizione",
|
||||
"style": "Solo Stile",
|
||||
"composition": "Solo Composizione",
|
||||
"ipAdapterMethod": "Metodo Adattatore IP"
|
||||
"ipAdapterMethod": "Modalità",
|
||||
"fullDesc": "Applica lo stile visivo (colori, texture) e la composizione (disposizione, struttura).",
|
||||
"styleDesc": "Applica lo stile visivo (colori, texture) senza considerare la disposizione.",
|
||||
"compositionDesc": "Replica disposizione e struttura ignorando lo stile di riferimento."
|
||||
},
|
||||
"showingType": "Mostra {{type}}",
|
||||
"dynamicGrid": "Griglia dinamica",
|
||||
@@ -1881,7 +1898,10 @@
|
||||
"lineart_anime_edge_detection": {
|
||||
"description": "Genera una mappa dei bordi dal livello selezionato utilizzando il modello di rilevamento dei bordi Lineart Anime.",
|
||||
"label": "Rilevamento bordi Lineart Anime"
|
||||
}
|
||||
},
|
||||
"forMoreControl": "Per un maggiore controllo, fare clic su Avanzate qui sotto.",
|
||||
"advanced": "Avanzate",
|
||||
"processingLayerWith": "Elaborazione del livello con il filtro {{type}}."
|
||||
},
|
||||
"controlLayers_withCount_hidden": "Livelli di controllo ({{count}} nascosti)",
|
||||
"regionalGuidance_withCount_hidden": "Guida regionale ({{count}} nascosti)",
|
||||
@@ -2016,8 +2036,6 @@
|
||||
"convertControlLayerTo": "Converti $t(controlLayers.controlLayer) in",
|
||||
"newRasterLayer": "Nuovo $t(controlLayers.rasterLayer)",
|
||||
"newRegionalGuidance": "Nuova $t(controlLayers.regionalGuidance)",
|
||||
"canvasAsRasterLayer": "$t(controlLayers.canvas) come $t(controlLayers.rasterLayer)",
|
||||
"canvasAsControlLayer": "$t(controlLayers.canvas) come $t(controlLayers.controlLayer)",
|
||||
"convertInpaintMaskTo": "Converti $t(controlLayers.inpaintMask) in",
|
||||
"copyRegionalGuidanceTo": "Copia $t(controlLayers.regionalGuidance) in",
|
||||
"convertRasterLayerTo": "Converti $t(controlLayers.rasterLayer) in",
|
||||
@@ -2025,7 +2043,35 @@
|
||||
"newControlLayer": "Nuovo $t(controlLayers.controlLayer)",
|
||||
"newInpaintMask": "Nuova $t(controlLayers.inpaintMask)",
|
||||
"replaceCurrent": "Sostituisci corrente",
|
||||
"mergeDown": "Unire in basso"
|
||||
"mergeDown": "Unire in basso",
|
||||
"mergingLayers": "Unione dei livelli",
|
||||
"controlLayerEmptyState": "<UploadButton>Carica un'immagine</UploadButton>, trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello oppure disegna sulla tela per iniziare.",
|
||||
"useImage": "Usa immagine",
|
||||
"resetGenerationSettings": "Ripristina impostazioni di generazione",
|
||||
"referenceImageEmptyState": "Per iniziare, <UploadButton>carica un'immagine</UploadButton> oppure trascina un'immagine dalla <GalleryButton>galleria</GalleryButton> su questo livello.",
|
||||
"asRasterLayer": "Come $t(controlLayers.rasterLayer)",
|
||||
"asRasterLayerResize": "Come $t(controlLayers.rasterLayer) (Ridimensiona)",
|
||||
"asControlLayer": "Come $t(controlLayers.controlLayer)",
|
||||
"asControlLayerResize": "Come $t(controlLayers.controlLayer) (Ridimensiona)",
|
||||
"newSession": "Nuova sessione",
|
||||
"resetCanvasLayers": "Ripristina livelli Tela",
|
||||
"referenceImageRegional": "Immagine di riferimento (regionale)",
|
||||
"referenceImageGlobal": "Immagine di riferimento (globale)",
|
||||
"warnings": {
|
||||
"controlAdapterNoModelSelected": "nessun modello selezionato per il livello di controllo",
|
||||
"controlAdapterNoControl": "nessun controllo selezionato/disegnato",
|
||||
"ipAdapterNoModelSelected": "nessun modello di immagine di riferimento selezionato",
|
||||
"rgNoPromptsOrIPAdapters": "nessun prompt testuale o immagini di riferimento",
|
||||
"rgReferenceImagesNotSupported": "Immagini di riferimento regionali non supportate per il modello base selezionato",
|
||||
"rgNoRegion": "nessuna regione disegnata",
|
||||
"problemsFound": "Problemi riscontrati",
|
||||
"unsupportedModel": "livello non supportato per il modello base selezionato",
|
||||
"controlAdapterIncompatibleBaseModel": "modello di base del livello di controllo incompatibile",
|
||||
"rgNegativePromptNotSupported": "Prompt negativo non supportato per il modello base selezionato",
|
||||
"ipAdapterIncompatibleBaseModel": "modello base dell'immagine di riferimento incompatibile",
|
||||
"ipAdapterNoImageSelected": "nessuna immagine di riferimento selezionata",
|
||||
"rgAutoNegativeNotSupported": "Auto-Negativo non supportato per il modello base selezionato"
|
||||
}
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2057,7 +2103,9 @@
|
||||
"postProcessingMissingModelWarning": "Visita <LinkComponent>Gestione modelli</LinkComponent> per installare un modello di post-elaborazione (da immagine a immagine).",
|
||||
"exceedsMaxSize": "Le impostazioni di ampliamento superano il limite massimo delle dimensioni",
|
||||
"exceedsMaxSizeDetails": "Il limite massimo di ampliamento è {{maxUpscaleDimension}}x{{maxUpscaleDimension}} pixel. Prova un'immagine più piccola o diminuisci la scala selezionata.",
|
||||
"upscale": "Amplia"
|
||||
"upscale": "Amplia",
|
||||
"incompatibleBaseModel": "Architettura del modello principale non supportata per l'ampliamento",
|
||||
"incompatibleBaseModelDesc": "L'ampliamento è supportato solo per i modelli di architettura SD1.5 e SDXL. Cambia il modello principale per abilitare l'ampliamento."
|
||||
},
|
||||
"upsell": {
|
||||
"inviteTeammates": "Invita collaboratori",
|
||||
@@ -2119,12 +2167,13 @@
|
||||
},
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "Novità in Invoke",
|
||||
"line2": "Supporto Flux esteso, ora con immagini di riferimento globali",
|
||||
"line3": "Tooltip e menu contestuali migliorati",
|
||||
"readReleaseNotes": "Leggi le note di rilascio",
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"line1": "Strumento <ItalicComponent>Seleziona oggetto</ItalicComponent> per la selezione e la modifica precise degli oggetti",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia"
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"<StrongComponent>FLUX Regional Guidance (beta)</StrongComponent>: la nostra versione beta di FLUX Regional Guidance è attiva per il controllo dei prompt regionali.",
|
||||
"<StrongComponent>Vari miglioramenti dell'esperienza utente</StrongComponent>: numerosi piccoli miglioramenti dell'esperienza utente e della qualità della vita in tutta l'app."
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
"logLevel": {
|
||||
@@ -2150,5 +2199,67 @@
|
||||
"logNamespaces": "Elementi del registro"
|
||||
},
|
||||
"enableLogging": "Abilita la registrazione"
|
||||
},
|
||||
"supportVideos": {
|
||||
"gettingStarted": "Iniziare",
|
||||
"supportVideos": "Video di supporto",
|
||||
"videos": {
|
||||
"usingControlLayersAndReferenceGuides": {
|
||||
"title": "Utilizzo di livelli di controllo e guide di riferimento",
|
||||
"description": "Scopri come guidare la creazione delle tue immagini con livelli di controllo e immagini di riferimento."
|
||||
},
|
||||
"creatingYourFirstImage": {
|
||||
"description": "Introduzione alla creazione di un'immagine da zero utilizzando gli strumenti di Invoke.",
|
||||
"title": "Creazione della tua prima immagine"
|
||||
},
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"description": "Panoramica delle trasformazioni immagine-a-immagine e della riduzione del rumore in Invoke.",
|
||||
"title": "Comprendere immagine-a-immagine e riduzione del rumore"
|
||||
},
|
||||
"howDoIDoImageToImageTransformation": {
|
||||
"description": "Tutorial su come eseguire trasformazioni da immagine a immagine in Invoke.",
|
||||
"title": "Come si esegue la trasformazione da immagine-a-immagine?"
|
||||
},
|
||||
"howDoIUseInpaintMasks": {
|
||||
"title": "Come si usano le maschere Inpaint?",
|
||||
"description": "Come applicare maschere inpaint per la correzione e la variazione delle immagini."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"description": "Guida all'outpainting oltre i confini dell'immagine originale.",
|
||||
"title": "Come posso eseguire l'outpainting?"
|
||||
},
|
||||
"exploringAIModelsAndConceptAdapters": {
|
||||
"description": "Approfondisci i modelli di intelligenza artificiale e scopri come utilizzare gli adattatori concettuali per il controllo creativo.",
|
||||
"title": "Esplorazione dei modelli di IA e degli adattatori concettuali"
|
||||
},
|
||||
"upscaling": {
|
||||
"title": "Ampliamento",
|
||||
"description": "Come ampliare le immagini con gli strumenti di Invoke per migliorarne la risoluzione."
|
||||
},
|
||||
"creatingAndComposingOnInvokesControlCanvas": {
|
||||
"description": "Impara a comporre immagini utilizzando la tela di controllo di Invoke.",
|
||||
"title": "Creare e comporre sulla tela di controllo di Invoke"
|
||||
},
|
||||
"howDoIGenerateAndSaveToTheGallery": {
|
||||
"description": "Passaggi per generare e salvare le immagini nella galleria.",
|
||||
"title": "Come posso generare e salvare nella Galleria?"
|
||||
},
|
||||
"howDoIEditOnTheCanvas": {
|
||||
"title": "Come posso apportare modifiche sulla tela?",
|
||||
"description": "Guida alla modifica delle immagini direttamente sulla tela."
|
||||
},
|
||||
"howDoIUseControlNetsAndControlLayers": {
|
||||
"title": "Come posso utilizzare le Reti di Controllo e i Livelli di Controllo?",
|
||||
"description": "Impara ad applicare livelli di controllo e reti di controllo alle tue immagini."
|
||||
},
|
||||
"howDoIUseGlobalIPAdaptersAndReferenceImages": {
|
||||
"title": "Come si utilizzano gli adattatori IP globali e le immagini di riferimento?",
|
||||
"description": "Introduzione all'aggiunta di immagini di riferimento e adattatori IP globali."
|
||||
}
|
||||
},
|
||||
"controlCanvas": "Tela di Controllo",
|
||||
"watch": "Guarda",
|
||||
"studioSessionsDesc1": "Dai un'occhiata a <StudioSessionsPlaylistLink /> per approfondimenti su Invoke.",
|
||||
"studioSessionsDesc2": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e fare domande. Le sessioni vengono caricate sulla playlist la settimana successiva."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"discordLabel": "Discord",
|
||||
"nodes": "ワークフロー",
|
||||
"txt2img": "txt2img",
|
||||
"postprocessing": "Post Processing",
|
||||
"postprocessing": "ポストプロセス",
|
||||
"t2iAdapter": "T2I アダプター",
|
||||
"communityLabel": "コミュニティ",
|
||||
"dontAskMeAgain": "次回から確認しない",
|
||||
@@ -71,8 +71,8 @@
|
||||
"orderBy": "並び順:",
|
||||
"enabled": "有効",
|
||||
"notInstalled": "未インストール",
|
||||
"positivePrompt": "プロンプト",
|
||||
"negativePrompt": "除外する要素",
|
||||
"positivePrompt": "ポジティブプロンプト",
|
||||
"negativePrompt": "ネガティブプロンプト",
|
||||
"selected": "選択済み",
|
||||
"aboutDesc": "Invokeを業務で利用する場合はマークしてください:",
|
||||
"beta": "ベータ",
|
||||
@@ -80,7 +80,20 @@
|
||||
"editor": "エディタ",
|
||||
"safetensors": "Safetensors",
|
||||
"tab": "タブ",
|
||||
"toResolve": "解決方法"
|
||||
"toResolve": "解決方法",
|
||||
"openInViewer": "ビューアで開く",
|
||||
"placeholderSelectAModel": "モデルを選択",
|
||||
"clipboard": "クリップボード",
|
||||
"apply": "適用",
|
||||
"loadingImage": "画像をロード中",
|
||||
"off": "オフ",
|
||||
"view": "ビュー",
|
||||
"edit": "編集",
|
||||
"ok": "OK",
|
||||
"reset": "リセット",
|
||||
"none": "なし",
|
||||
"new": "新規",
|
||||
"close": "閉じる"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "画像のサイズ",
|
||||
@@ -125,12 +138,114 @@
|
||||
"compareHelp1": "<Kbd>Alt</Kbd> キーを押しながらギャラリー画像をクリックするか、矢印キーを使用して比較画像を変更します。",
|
||||
"compareHelp3": "<Kbd>C</Kbd>を押して、比較した画像を入れ替えます。",
|
||||
"compareHelp4": "<Kbd>[Z</Kbd>]または<Kbd>[Esc</Kbd>]を押して終了します。",
|
||||
"compareHelp2": "<Kbd>M</Kbd> キーを押して比較モードを切り替えます。"
|
||||
"compareHelp2": "<Kbd>M</Kbd> キーを押して比較モードを切り替えます。",
|
||||
"move": "移動",
|
||||
"openViewer": "ビューアを開く",
|
||||
"closeViewer": "ビューアを閉じる",
|
||||
"exitSearch": "画像検索を終了",
|
||||
"oldestFirst": "最古から",
|
||||
"showStarredImagesFirst": "スター付き画像を最初に",
|
||||
"exitBoardSearch": "ボード検索を終了",
|
||||
"showArchivedBoards": "アーカイブされたボードを表示",
|
||||
"searchImages": "メタデータで検索",
|
||||
"gallery": "ギャラリー",
|
||||
"newestFirst": "最新から",
|
||||
"jump": "ジャンプ",
|
||||
"go": "進む",
|
||||
"sortDirection": "並び替え順",
|
||||
"displayBoardSearch": "ボード検索",
|
||||
"displaySearch": "画像を検索",
|
||||
"boardsSettings": "ボード設定",
|
||||
"imagesSettings": "ギャラリー画像設定"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "ホットキーを検索",
|
||||
"clearSearch": "検索をクリア",
|
||||
"noHotkeysFound": "ホットキーが見つかりません"
|
||||
"noHotkeysFound": "ホットキーが見つかりません",
|
||||
"viewer": {
|
||||
"runPostprocessing": {
|
||||
"title": "ポストプロセスを実行"
|
||||
},
|
||||
"useSize": {
|
||||
"title": "サイズを使用"
|
||||
},
|
||||
"recallPrompts": {
|
||||
"title": "プロンプトを再使用"
|
||||
},
|
||||
"recallAll": {
|
||||
"title": "全てのメタデータを再使用"
|
||||
},
|
||||
"recallSeed": {
|
||||
"title": "シード値を再使用"
|
||||
}
|
||||
},
|
||||
"canvas": {
|
||||
"redo": {
|
||||
"title": "やり直し"
|
||||
},
|
||||
"transformSelected": {
|
||||
"title": "変形"
|
||||
},
|
||||
"undo": {
|
||||
"title": "取り消し"
|
||||
},
|
||||
"selectEraserTool": {
|
||||
"title": "消しゴムツール"
|
||||
},
|
||||
"cancelTransform": {
|
||||
"title": "変形をキャンセル"
|
||||
},
|
||||
"resetSelected": {
|
||||
"title": "レイヤーをリセット"
|
||||
},
|
||||
"applyTransform": {
|
||||
"title": "変形を適用"
|
||||
},
|
||||
"selectColorPickerTool": {
|
||||
"title": "スポイトツール"
|
||||
},
|
||||
"fitBboxToCanvas": {
|
||||
"title": "バウンディングボックスをキャンバスにフィット"
|
||||
},
|
||||
"selectBrushTool": {
|
||||
"title": "ブラシツール"
|
||||
},
|
||||
"selectMoveTool": {
|
||||
"title": "移動ツール"
|
||||
},
|
||||
"selectBboxTool": {
|
||||
"title": "バウンディングボックスツール"
|
||||
},
|
||||
"title": "キャンバス",
|
||||
"fitLayersToCanvas": {
|
||||
"title": "レイヤーをキャンバスにフィット"
|
||||
}
|
||||
},
|
||||
"workflows": {
|
||||
"undo": {
|
||||
"title": "取り消し"
|
||||
},
|
||||
"redo": {
|
||||
"title": "やり直し"
|
||||
}
|
||||
},
|
||||
"app": {
|
||||
"toggleLeftPanel": {
|
||||
"title": "左パネルをトグル",
|
||||
"desc": "左パネルを表示または非表示。"
|
||||
},
|
||||
"title": "アプリケーション",
|
||||
"invoke": {
|
||||
"title": "Invoke"
|
||||
},
|
||||
"cancelQueueItem": {
|
||||
"title": "キャンセル"
|
||||
},
|
||||
"clearQueue": {
|
||||
"title": "キューをクリア"
|
||||
}
|
||||
},
|
||||
"hotkeys": "ホットキー"
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "モデルマネージャ",
|
||||
@@ -165,7 +280,7 @@
|
||||
"convertToDiffusers": "ディフューザーに変換",
|
||||
"alpha": "アルファ",
|
||||
"modelConverted": "モデル変換が完了しました",
|
||||
"predictionType": "予測タイプ(安定したディフュージョン 2.x モデルおよび一部の安定したディフュージョン 1.x モデル用)",
|
||||
"predictionType": "予測タイプ(SD 2.x モデルおよび一部のSD 1.x モデル用)",
|
||||
"selectModel": "モデルを選択",
|
||||
"advanced": "高度な設定",
|
||||
"modelDeleted": "モデルが削除されました",
|
||||
@@ -178,7 +293,9 @@
|
||||
"convertToDiffusersHelpText1": "このモデルは 🧨 Diffusers フォーマットに変換されます。",
|
||||
"convertToDiffusersHelpText3": "チェックポイントファイルは、InvokeAIルートフォルダ内にある場合、ディスクから削除されます。カスタムロケーションにある場合は、削除されません。",
|
||||
"convertToDiffusersHelpText4": "これは一回限りのプロセスです。コンピュータの仕様によっては、約30秒から60秒かかる可能性があります。",
|
||||
"cancel": "キャンセル"
|
||||
"cancel": "キャンセル",
|
||||
"uploadImage": "画像をアップロード",
|
||||
"addModels": "モデルを追加"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "画像",
|
||||
@@ -200,7 +317,19 @@
|
||||
"info": "情報",
|
||||
"showOptionsPanel": "オプションパネルを表示",
|
||||
"iterations": "生成回数",
|
||||
"general": "基本設定"
|
||||
"general": "基本設定",
|
||||
"setToOptimalSize": "サイズをモデルに最適化",
|
||||
"invoke": {
|
||||
"addingImagesTo": "画像の追加先"
|
||||
},
|
||||
"aspect": "縦横比",
|
||||
"lockAspectRatio": "縦横比を固定",
|
||||
"scheduler": "スケジューラー",
|
||||
"sendToUpscale": "アップスケーラーに転送",
|
||||
"useSize": "サイズを使用",
|
||||
"postProcessing": "ポストプロセス (Shift + U)",
|
||||
"denoisingStrength": "ノイズ除去強度",
|
||||
"recallMetadata": "メタデータを再使用"
|
||||
},
|
||||
"settings": {
|
||||
"models": "モデル",
|
||||
@@ -213,7 +342,11 @@
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "アップロード失敗",
|
||||
"imageCopied": "画像をコピー"
|
||||
"imageCopied": "画像をコピー",
|
||||
"imageUploadFailed": "画像のアップロードに失敗しました",
|
||||
"uploadFailedInvalidUploadDesc": "画像はPNGかJPGである必要があります。",
|
||||
"sentToUpscale": "アップスケーラーに転送しました",
|
||||
"imageUploaded": "画像をアップロードしました"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "進捗バー",
|
||||
@@ -226,7 +359,10 @@
|
||||
"resetUI": "$t(accessibility.reset) UI",
|
||||
"mode": "モード:",
|
||||
"about": "Invoke について",
|
||||
"submitSupportTicket": "サポート依頼を送信する"
|
||||
"submitSupportTicket": "サポート依頼を送信する",
|
||||
"uploadImages": "画像をアップロード",
|
||||
"toggleLeftPanel": "左パネルをトグル (T)",
|
||||
"toggleRightPanel": "右パネルをトグル (G)"
|
||||
},
|
||||
"metadata": {
|
||||
"Threshold": "ノイズ閾値",
|
||||
@@ -237,7 +373,8 @@
|
||||
"scheduler": "スケジューラー",
|
||||
"positivePrompt": "ポジティブプロンプト",
|
||||
"strength": "Image to Image 強度",
|
||||
"recallParameters": "パラメータを呼び出す"
|
||||
"recallParameters": "パラメータを再使用",
|
||||
"recallParameter": "{{label}} を再使用"
|
||||
},
|
||||
"queue": {
|
||||
"queueEmpty": "キューが空です",
|
||||
@@ -297,14 +434,22 @@
|
||||
"prune": "刈り込み",
|
||||
"prompts_other": "プロンプト",
|
||||
"iterations_other": "繰り返し",
|
||||
"generations_other": "生成"
|
||||
"generations_other": "生成",
|
||||
"canvas": "キャンバス",
|
||||
"workflows": "ワークフロー",
|
||||
"upscaling": "アップスケール",
|
||||
"generation": "生成",
|
||||
"other": "その他",
|
||||
"gallery": "ギャラリー"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "一致するモデルがありません",
|
||||
"loading": "読み込み中",
|
||||
"noMatchingLoRAs": "一致するLoRAがありません",
|
||||
"noModelsAvailable": "使用可能なモデルがありません",
|
||||
"selectModel": "モデルを選択してください"
|
||||
"selectModel": "モデルを選択してください",
|
||||
"concepts": "コンセプト",
|
||||
"addLora": "LoRAを追加"
|
||||
},
|
||||
"nodes": {
|
||||
"addNode": "ノードを追加",
|
||||
@@ -339,7 +484,8 @@
|
||||
"cannotConnectOutputToOutput": "出力から出力には接続できません",
|
||||
"cannotConnectToSelf": "自身のノードには接続できません",
|
||||
"colorCodeEdges": "カラー-Code Edges",
|
||||
"loadingNodes": "ノードを読み込み中..."
|
||||
"loadingNodes": "ノードを読み込み中...",
|
||||
"scheduler": "スケジューラー"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "自動追加するボード",
|
||||
@@ -362,7 +508,18 @@
|
||||
"deleteBoardAndImages": "ボードと画像の削除",
|
||||
"deleteBoardOnly": "ボードのみ削除",
|
||||
"deletedBoardsCannotbeRestored": "削除されたボードは復元できません",
|
||||
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:"
|
||||
"movingImagesToBoard_other": "{{count}} の画像をボードに移動:",
|
||||
"hideBoards": "ボードを隠す",
|
||||
"assetsWithCount_other": "{{count}} のアセット",
|
||||
"addPrivateBoard": "プライベートボードを追加",
|
||||
"addSharedBoard": "共有ボードを追加",
|
||||
"boards": "ボード",
|
||||
"private": "プライベートボード",
|
||||
"shared": "共有ボード",
|
||||
"archiveBoard": "ボードをアーカイブ",
|
||||
"archived": "アーカイブ完了",
|
||||
"unarchiveBoard": "アーカイブされていないボード",
|
||||
"imagesWithCount_other": "{{count}} の画像"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "呼び出しキャッシュ",
|
||||
@@ -387,6 +544,33 @@
|
||||
"paragraphs": [
|
||||
"生成された画像の縦横比。"
|
||||
]
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
"heading": "領域ガイダンスと領域参照画像"
|
||||
},
|
||||
"regionalReferenceImage": {
|
||||
"heading": "領域参照画像"
|
||||
},
|
||||
"paramScheduler": {
|
||||
"heading": "スケジューラー"
|
||||
},
|
||||
"regionalGuidance": {
|
||||
"heading": "領域ガイダンス"
|
||||
},
|
||||
"rasterLayer": {
|
||||
"heading": "ラスターレイヤー"
|
||||
},
|
||||
"globalReferenceImage": {
|
||||
"heading": "全域参照画像"
|
||||
},
|
||||
"paramUpscaleMethod": {
|
||||
"heading": "アップスケール手法"
|
||||
},
|
||||
"upscaleModel": {
|
||||
"heading": "アップスケールモデル"
|
||||
},
|
||||
"paramAspect": {
|
||||
"heading": "縦横比"
|
||||
}
|
||||
},
|
||||
"accordions": {
|
||||
@@ -427,5 +611,79 @@
|
||||
"tabs": {
|
||||
"queue": "キュー"
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
"globalReferenceImage_withCount_other": "全域参照画像",
|
||||
"regionalReferenceImage": "領域参照画像",
|
||||
"saveLayerToAssets": "レイヤーをアセットに保存",
|
||||
"global": "全域",
|
||||
"inpaintMasks_withCount_hidden": "インペイントマスク ({{count}} hidden)",
|
||||
"opacity": "透明度",
|
||||
"canvasContextMenu": {
|
||||
"newRegionalGuidance": "新規領域ガイダンス",
|
||||
"bboxGroup": "バウンディングボックスから作成",
|
||||
"cropCanvasToBbox": "キャンバスをバウンディングボックスでクロップ",
|
||||
"newGlobalReferenceImage": "新規全域参照画像",
|
||||
"newRegionalReferenceImage": "新規領域参照画像"
|
||||
},
|
||||
"regionalGuidance": "領域ガイダンス",
|
||||
"globalReferenceImage": "全域参照画像",
|
||||
"moveForward": "前面へ移動",
|
||||
"copyInpaintMaskTo": "$t(controlLayers.inpaintMask) をコピー",
|
||||
"transform": {
|
||||
"fitToBbox": "バウンディングボックスにフィット",
|
||||
"transform": "変形",
|
||||
"apply": "適用",
|
||||
"cancel": "キャンセル",
|
||||
"reset": "リセット"
|
||||
},
|
||||
"cropLayerToBbox": "レイヤーをバウンディングボックスでクロップ",
|
||||
"convertInpaintMaskTo": "$t(controlLayers.inpaintMask)を変換",
|
||||
"regionalGuidance_withCount_other": "領域ガイダンス",
|
||||
"tool": {
|
||||
"colorPicker": "スポイト",
|
||||
"brush": "ブラシ",
|
||||
"rectangle": "矩形",
|
||||
"move": "移動",
|
||||
"eraser": "消しゴム"
|
||||
},
|
||||
"saveCanvasToGallery": "キャンバスをギャラリーに保存",
|
||||
"saveBboxToGallery": "バウンディングボックスをギャラリーへ保存",
|
||||
"moveToBack": "最背面へ移動",
|
||||
"duplicate": "複製",
|
||||
"addLayer": "レイヤーを追加",
|
||||
"rasterLayer": "ラスターレイヤー",
|
||||
"inpaintMasks_withCount_visible": "({{count}}) インペイントマスク",
|
||||
"regional": "領域",
|
||||
"rectangle": "矩形",
|
||||
"moveBackward": "背面へ移動",
|
||||
"moveToFront": "最前面へ移動",
|
||||
"mergeDown": "レイヤーを統合",
|
||||
"inpaintMask_withCount_other": "インペイントマスク",
|
||||
"canvas": "キャンバス",
|
||||
"fitBboxToLayers": "バウンディングボックスをレイヤーにフィット",
|
||||
"removeBookmark": "ブックマークを外す",
|
||||
"savedToGalleryOk": "ギャラリーに保存しました"
|
||||
},
|
||||
"stylePresets": {
|
||||
"clearTemplateSelection": "選択したテンプレートをクリア",
|
||||
"choosePromptTemplate": "プロンプトテンプレートを選択",
|
||||
"myTemplates": "自分のテンプレート",
|
||||
"flatten": "選択中のテンプレートをプロンプトに展開",
|
||||
"uploadImage": "画像をアップロード",
|
||||
"defaultTemplates": "デフォルトテンプレート",
|
||||
"createPromptTemplate": "プロンプトテンプレートを作成",
|
||||
"promptTemplateCleared": "プロンプトテンプレートをクリアしました",
|
||||
"searchByName": "名前で検索",
|
||||
"toggleViewMode": "表示モードを切り替え"
|
||||
},
|
||||
"upscaling": {
|
||||
"upscaleModel": "アップスケールモデル",
|
||||
"postProcessingModel": "ポストプロセスモデル",
|
||||
"upscale": "アップスケール"
|
||||
},
|
||||
"sdxl": {
|
||||
"denoisingStrength": "ノイズ除去強度",
|
||||
"scheduler": "スケジューラー"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -230,16 +230,7 @@
|
||||
"systemDisconnected": "Systeem is niet verbonden",
|
||||
"missingNodeTemplate": "Knooppuntsjabloon ontbreekt",
|
||||
"missingFieldTemplate": "Veldsjabloon ontbreekt",
|
||||
"addingImagesTo": "Bezig met toevoegen van afbeeldingen aan",
|
||||
"layer": {
|
||||
"controlAdapterNoModelSelected": "geen controle-adaptermodel geselecteerd",
|
||||
"controlAdapterIncompatibleBaseModel": "niet-compatibele basismodel voor controle-adapter",
|
||||
"ipAdapterIncompatibleBaseModel": "niet-compatibele basismodel voor IP-adapter",
|
||||
"ipAdapterNoImageSelected": "geen afbeelding voor IP-adapter geselecteerd",
|
||||
"rgNoRegion": "geen gebied geselecteerd",
|
||||
"rgNoPromptsOrIPAdapters": "geen tekstprompts of IP-adapters",
|
||||
"ipAdapterNoModelSelected": "geen IP-adapter geselecteerd"
|
||||
}
|
||||
"addingImagesTo": "Bezig met toevoegen van afbeeldingen aan"
|
||||
},
|
||||
"patchmatchDownScaleSize": "Verklein",
|
||||
"useCpuNoise": "Gebruik CPU-ruis",
|
||||
|
||||
@@ -10,7 +10,24 @@
|
||||
"load": "Załaduj",
|
||||
"statusDisconnected": "Odłączono od serwera",
|
||||
"githubLabel": "GitHub",
|
||||
"discordLabel": "Discord"
|
||||
"discordLabel": "Discord",
|
||||
"clipboard": "Schowek",
|
||||
"aboutDesc": "Wykorzystujesz Invoke do pracy? Sprawdź:",
|
||||
"ai": "SI",
|
||||
"areYouSure": "Czy jesteś pewien?",
|
||||
"copyError": "$t(gallery.copy) Błąd",
|
||||
"apply": "Zastosuj",
|
||||
"copy": "Kopiuj",
|
||||
"or": "albo",
|
||||
"add": "Dodaj",
|
||||
"off": "Wyłączony",
|
||||
"accept": "Zaakceptuj",
|
||||
"cancel": "Anuluj",
|
||||
"advanced": "Zawansowane",
|
||||
"back": "Do tyłu",
|
||||
"auto": "Automatyczny",
|
||||
"beta": "Beta",
|
||||
"close": "Wyjdź"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Rozmiar obrazów",
|
||||
@@ -65,6 +82,42 @@
|
||||
"uploadImage": "Wgrywanie obrazu",
|
||||
"previousImage": "Poprzedni obraz",
|
||||
"nextImage": "Następny obraz",
|
||||
"menu": "Menu"
|
||||
"menu": "Menu",
|
||||
"mode": "Tryb"
|
||||
},
|
||||
"boards": {
|
||||
"cancel": "Anuluj",
|
||||
"noBoards": "Brak tablic typu {{boardType}}",
|
||||
"imagesWithCount_one": "{{count}} zdjęcie",
|
||||
"imagesWithCount_few": "{{count}} zdjęcia",
|
||||
"imagesWithCount_many": "{{count}} zdjęcia",
|
||||
"private": "Prywatne tablice",
|
||||
"updateBoardError": "Błąd aktualizacji tablicy",
|
||||
"uncategorized": "Nieskategoryzowane",
|
||||
"selectBoard": "Wybierz tablicę",
|
||||
"downloadBoard": "Pobierz tablice",
|
||||
"loading": "Ładowanie...",
|
||||
"move": "Przenieś",
|
||||
"noMatching": "Brak pasujących tablic"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
"title": "Kompozycja",
|
||||
"infillTab": "Inskrypcja",
|
||||
"coherenceTab": "Przebieg Koherencji"
|
||||
},
|
||||
"generation": {
|
||||
"title": "Generowanie"
|
||||
},
|
||||
"image": {
|
||||
"title": "Zdjęcie"
|
||||
},
|
||||
"advanced": {
|
||||
"options": "$t(accordions.advanced.title) Opcje",
|
||||
"title": "Zaawansowane"
|
||||
},
|
||||
"control": {
|
||||
"title": "Kontrola"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -648,19 +648,6 @@
|
||||
"missingFieldTemplate": "Отсутствует шаблон поля",
|
||||
"addingImagesTo": "Добавление изображений в",
|
||||
"invoke": "Создать",
|
||||
"layer": {
|
||||
"ipAdapterNoModelSelected": "IP адаптер не выбран",
|
||||
"controlAdapterNoModelSelected": "не выбрана модель адаптера контроля",
|
||||
"controlAdapterIncompatibleBaseModel": "несовместимая базовая модель адаптера контроля",
|
||||
"rgNoRegion": "регион не выбран",
|
||||
"rgNoPromptsOrIPAdapters": "нет текстовых запросов или IP-адаптеров",
|
||||
"ipAdapterIncompatibleBaseModel": "несовместимая базовая модель IP-адаптера",
|
||||
"ipAdapterNoImageSelected": "изображение IP-адаптера не выбрано",
|
||||
"t2iAdapterIncompatibleScaledBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, масштабированная ширина рамки {{width}}",
|
||||
"t2iAdapterIncompatibleBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, высота рамки {{height}}",
|
||||
"t2iAdapterIncompatibleBboxWidth": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, ширина рамки {{width}}",
|
||||
"t2iAdapterIncompatibleScaledBboxHeight": "$t(parameters.invoke.layer.t2iAdapterRequiresDimensionsToBeMultipleOf) {{multiple}}, масштабированная высота рамки {{height}}"
|
||||
},
|
||||
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), ширина рамки {{width}}",
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), высота рамки {{height}}",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16), масштабированная высота рамки {{height}}",
|
||||
@@ -1660,7 +1647,6 @@
|
||||
"clearCaches": "Очистить кэши",
|
||||
"recalculateRects": "Пересчитать прямоугольники",
|
||||
"saveBboxToGallery": "Сохранить рамку в галерею",
|
||||
"resetCanvas": "Сбросить холст",
|
||||
"canvas": "Холст",
|
||||
"global": "Глобальный",
|
||||
"newGlobalReferenceImageError": "Проблема с созданием глобального эталонного изображения",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -96,7 +96,9 @@
|
||||
"view": "视图",
|
||||
"alpha": "透明度通道",
|
||||
"openInViewer": "在查看器中打开",
|
||||
"clipboard": "剪贴板"
|
||||
"clipboard": "剪贴板",
|
||||
"loadingModel": "加载模型",
|
||||
"generating": "生成中"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "预览大小",
|
||||
@@ -587,7 +589,27 @@
|
||||
"huggingFace": "HuggingFace",
|
||||
"hfTokenInvalid": "HF 令牌无效或缺失",
|
||||
"hfTokenLabel": "HuggingFace 令牌(某些模型所需)",
|
||||
"hfTokenHelperText": "使用某些模型需要 HF 令牌。点击这里创建或获取你的令牌。"
|
||||
"hfTokenHelperText": "使用某些模型需要 HF 令牌。点击这里创建或获取你的令牌。",
|
||||
"includesNModels": "包括 {{n}} 个模型及其依赖项",
|
||||
"starterBundles": "启动器包",
|
||||
"learnMoreAboutSupportedModels": "了解更多关于我们支持的模型的信息",
|
||||
"hfForbidden": "您没有权限访问这个 HF 模型",
|
||||
"hfTokenInvalidErrorMessage": "无效或缺失 HuggingFace 令牌。",
|
||||
"hfTokenRequired": "您正在尝试下载一个需要有效 HuggingFace 令牌的模型。",
|
||||
"hfTokenSaved": "HF 令牌已保存",
|
||||
"hfForbiddenErrorMessage": "我们建议访问 HuggingFace.com 上的仓库页面。所有者可能要求您接受条款才能下载。",
|
||||
"hfTokenUnableToVerifyErrorMessage": "无法验证 HuggingFace 令牌。这可能是由于网络错误导致的。请稍后再试。",
|
||||
"hfTokenInvalidErrorMessage2": "在这里更新它。 ",
|
||||
"hfTokenUnableToVerify": "无法验证 HF 令牌",
|
||||
"skippingXDuplicates_other": "跳过 {{count}} 个重复项",
|
||||
"starterBundleHelpText": "轻松安装所有用于启动基础模型所需的模型,包括主模型、ControlNets、IP适配器等。选择一个安装包时,会跳过已安装的模型。",
|
||||
"installingBundle": "正在安装模型包",
|
||||
"installingModel": "正在安装模型",
|
||||
"installingXModels_other": "正在安装 {{count}} 个模型",
|
||||
"t5Encoder": "T5 编码器",
|
||||
"clipLEmbed": "CLIP-L 嵌入",
|
||||
"clipGEmbed": "CLIP-G 嵌入",
|
||||
"loraModels": "LoRAs(低秩适配)"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "图像",
|
||||
@@ -639,15 +661,17 @@
|
||||
"missingFieldTemplate": "缺失模板",
|
||||
"addingImagesTo": "添加图像到",
|
||||
"noPrompts": "没有已生成的提示词",
|
||||
"layer": {
|
||||
"ipAdapterNoModelSelected": "未选择IP adapter",
|
||||
"controlAdapterNoModelSelected": "未选择Control Adapter模型",
|
||||
"rgNoPromptsOrIPAdapters": "无文本提示或IP Adapters",
|
||||
"controlAdapterIncompatibleBaseModel": "Control Adapter的基础模型不兼容",
|
||||
"ipAdapterIncompatibleBaseModel": "IP Adapter的基础模型不兼容",
|
||||
"ipAdapterNoImageSelected": "未选择IP Adapter图像",
|
||||
"rgNoRegion": "未选择区域"
|
||||
}
|
||||
"canvasIsFiltering": "画布正在过滤",
|
||||
"fluxModelIncompatibleScaledBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),缩放后的边界框高度为 {{height}}",
|
||||
"noCLIPEmbedModelSelected": "未为FLUX生成选择CLIP嵌入模型",
|
||||
"noFLUXVAEModelSelected": "未为FLUX生成选择VAE模型",
|
||||
"canvasIsRasterizing": "画布正在栅格化",
|
||||
"canvasIsCompositing": "画布正在合成",
|
||||
"fluxModelIncompatibleBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),边界框宽度为 {{width}}",
|
||||
"fluxModelIncompatibleScaledBboxWidth": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),缩放后的边界框宽度为 {{width}}",
|
||||
"noT5EncoderModelSelected": "未为FLUX生成选择T5编码器模型",
|
||||
"fluxModelIncompatibleBboxHeight": "$t(parameters.invoke.fluxRequiresDimensionsToBeMultipleOf16),边界框高度为 {{height}}",
|
||||
"canvasIsTransforming": "画布正在变换"
|
||||
},
|
||||
"patchmatchDownScaleSize": "缩小",
|
||||
"clipSkip": "CLIP 跳过层",
|
||||
@@ -669,7 +693,15 @@
|
||||
"sendToUpscale": "发送到放大",
|
||||
"processImage": "处理图像",
|
||||
"infillColorValue": "填充颜色",
|
||||
"coherenceMinDenoise": "最小去噪"
|
||||
"coherenceMinDenoise": "最小去噪",
|
||||
"sendToCanvas": "发送到画布",
|
||||
"disabledNoRasterContent": "已禁用(无栅格内容)",
|
||||
"optimizedImageToImage": "优化的图生图",
|
||||
"guidance": "引导",
|
||||
"gaussianBlur": "高斯模糊",
|
||||
"recallMetadata": "调用元数据",
|
||||
"boxBlur": "方框模糊",
|
||||
"staged": "已分阶段处理"
|
||||
},
|
||||
"settings": {
|
||||
"models": "模型",
|
||||
@@ -699,7 +731,12 @@
|
||||
"enableInformationalPopovers": "启用信息弹窗",
|
||||
"reloadingIn": "重新加载中",
|
||||
"informationalPopoversDisabled": "信息提示框已禁用",
|
||||
"informationalPopoversDisabledDesc": "信息提示框已被禁用.请在设置中重新启用."
|
||||
"informationalPopoversDisabledDesc": "信息提示框已被禁用.请在设置中重新启用.",
|
||||
"enableModelDescriptions": "在下拉菜单中启用模型描述",
|
||||
"confirmOnNewSession": "新会话时确认",
|
||||
"modelDescriptionsDisabledDesc": "下拉菜单中的模型描述已被禁用。可在设置中启用。",
|
||||
"modelDescriptionsDisabled": "下拉菜单中的模型描述已禁用",
|
||||
"showDetailedInvocationProgress": "显示进度详情"
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "上传失败",
|
||||
@@ -740,7 +777,24 @@
|
||||
"errorCopied": "错误信息已复制",
|
||||
"modelImportCanceled": "模型导入已取消",
|
||||
"importFailed": "导入失败",
|
||||
"importSuccessful": "导入成功"
|
||||
"importSuccessful": "导入成功",
|
||||
"layerSavedToAssets": "图层已保存到资产",
|
||||
"sentToUpscale": "已发送到放大处理",
|
||||
"addedToUncategorized": "已添加到看板 $t(boards.uncategorized) 的资产中",
|
||||
"linkCopied": "链接已复制",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "最多只能上传 {{count}} 张 PNG 或 JPEG 图像。",
|
||||
"problemSavingLayer": "无法保存图层",
|
||||
"unableToLoadImage": "无法加载图像",
|
||||
"imageNotLoadedDesc": "无法找到图像",
|
||||
"unableToLoadStylePreset": "无法加载样式预设",
|
||||
"stylePresetLoaded": "样式预设已加载",
|
||||
"problemCopyingLayer": "无法复制图层",
|
||||
"sentToCanvas": "已发送到画布",
|
||||
"unableToLoadImageMetadata": "无法加载图像元数据",
|
||||
"imageSaved": "图像已保存",
|
||||
"imageSavingFailed": "图像保存失败",
|
||||
"layerCopiedToClipboard": "图层已复制到剪贴板",
|
||||
"imagesWillBeAddedTo": "上传的图像将添加到看板 {{boardName}} 的资产中。"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Invoke 进度条",
|
||||
@@ -890,7 +944,12 @@
|
||||
"clearWorkflow": "清除工作流",
|
||||
"imageAccessError": "无法找到图像 {{image_name}},正在恢复默认设置",
|
||||
"boardAccessError": "无法找到面板 {{board_id}},正在恢复默认设置",
|
||||
"modelAccessError": "无法找到模型 {{key}},正在恢复默认设置"
|
||||
"modelAccessError": "无法找到模型 {{key}},正在恢复默认设置",
|
||||
"noWorkflows": "无工作流程",
|
||||
"workflowHelpText": "需要帮助?请查看我们的《<LinkComponent>工作流程入门指南</LinkComponent>》。",
|
||||
"noMatchingWorkflows": "无匹配的工作流程",
|
||||
"saveToGallery": "保存到图库",
|
||||
"singleFieldType": "{{name}}(单一模型)"
|
||||
},
|
||||
"queue": {
|
||||
"status": "状态",
|
||||
@@ -1221,7 +1280,8 @@
|
||||
"heading": "去噪强度",
|
||||
"paragraphs": [
|
||||
"为输入图像添加的噪声量。",
|
||||
"输入 0 会导致结果图像和输入完全相同,输入 1 则会生成全新的图像。"
|
||||
"输入 0 会导致结果图像和输入完全相同,输入 1 则会生成全新的图像。",
|
||||
"当没有具有可见内容的栅格图层时,此设置将被忽略。"
|
||||
]
|
||||
},
|
||||
"paramSeed": {
|
||||
@@ -1410,7 +1470,8 @@
|
||||
"paragraphs": [
|
||||
"控制提示对生成过程的影响程度.",
|
||||
"与生成CFG Scale相似."
|
||||
]
|
||||
],
|
||||
"heading": "CFG比例"
|
||||
},
|
||||
"structure": {
|
||||
"heading": "结构",
|
||||
@@ -1441,6 +1502,62 @@
|
||||
"paragraphs": [
|
||||
"比例控制决定了输出图像的大小,它是基于输入图像分辨率的倍数来计算的.例如对一张1024x1024的图像进行2倍上采样,将会得到一张2048x2048的输出图像."
|
||||
]
|
||||
},
|
||||
"globalReferenceImage": {
|
||||
"heading": "全局参考图像",
|
||||
"paragraphs": [
|
||||
"应用参考图像以影响整个生成过程。"
|
||||
]
|
||||
},
|
||||
"rasterLayer": {
|
||||
"paragraphs": [
|
||||
"画布的基于像素的内容,用于图像生成过程。"
|
||||
],
|
||||
"heading": "栅格图层"
|
||||
},
|
||||
"regionalGuidanceAndReferenceImage": {
|
||||
"paragraphs": [
|
||||
"对于区域引导,使用画笔引导全局提示中的元素应出现的位置。",
|
||||
"对于区域参考图像,使用画笔将参考图像应用到特定区域。"
|
||||
],
|
||||
"heading": "区域引导与区域参考图像"
|
||||
},
|
||||
"regionalReferenceImage": {
|
||||
"heading": "区域参考图像",
|
||||
"paragraphs": [
|
||||
"使用画笔将参考图像应用到特定区域。"
|
||||
]
|
||||
},
|
||||
"optimizedDenoising": {
|
||||
"heading": "优化的图生图",
|
||||
"paragraphs": [
|
||||
"启用‘优化的图生图’功能,可在使用 Flux 模型进行图生图和图像修复转换时提供更平滑的降噪强度调节。此设置可以提高对图像变化程度的控制能力,但如果您更倾向于使用标准的降噪强度调节方式,也可以关闭此功能。该设置仍在优化中,目前处于测试阶段。"
|
||||
]
|
||||
},
|
||||
"inpainting": {
|
||||
"paragraphs": [
|
||||
"控制由降噪强度引导的修改区域。"
|
||||
],
|
||||
"heading": "图像重绘"
|
||||
},
|
||||
"regionalGuidance": {
|
||||
"heading": "区域引导",
|
||||
"paragraphs": [
|
||||
"使用画笔引导全局提示中的元素应出现的位置。"
|
||||
]
|
||||
},
|
||||
"fluxDevLicense": {
|
||||
"heading": "非商业许可",
|
||||
"paragraphs": [
|
||||
"FLUX.1 [dev] 模型受 FLUX [dev] 非商业许可协议的约束。如需在 Invoke 中将此模型类型用于商业目的,请访问我们的网站了解更多信息。"
|
||||
]
|
||||
},
|
||||
"paramGuidance": {
|
||||
"paragraphs": [
|
||||
"控制提示对生成过程的影响程度。",
|
||||
"较高的引导值可能导致过度饱和,而过高或过低的引导值可能导致生成结果失真。引导仅适用于FLUX DEV模型。"
|
||||
],
|
||||
"heading": "引导"
|
||||
}
|
||||
},
|
||||
"invocationCache": {
|
||||
@@ -1503,7 +1620,18 @@
|
||||
"convertGraph": "转换图表",
|
||||
"loadWorkflow": "$t(common.load) 工作流",
|
||||
"loadFromGraph": "从图表加载工作流",
|
||||
"autoLayout": "自动布局"
|
||||
"autoLayout": "自动布局",
|
||||
"edit": "编辑",
|
||||
"copyShareLinkForWorkflow": "复制工作流程的分享链接",
|
||||
"delete": "删除",
|
||||
"download": "下载",
|
||||
"defaultWorkflows": "默认工作流程",
|
||||
"userWorkflows": "用户工作流程",
|
||||
"projectWorkflows": "项目工作流程",
|
||||
"copyShareLink": "复制分享链接",
|
||||
"chooseWorkflowFromLibrary": "从库中选择工作流程",
|
||||
"uploadAndSaveWorkflow": "上传到库",
|
||||
"deleteWorkflow2": "您确定要删除此工作流程吗?此操作无法撤销。"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
@@ -1542,7 +1670,108 @@
|
||||
"addPositivePrompt": "添加 $t(controlLayers.prompt)",
|
||||
"addNegativePrompt": "添加 $t(controlLayers.negativePrompt)",
|
||||
"rectangle": "矩形",
|
||||
"opacity": "透明度"
|
||||
"opacity": "透明度",
|
||||
"canvas": "画布",
|
||||
"fitBboxToLayers": "将边界框适配到图层",
|
||||
"cropLayerToBbox": "将图层裁剪到边界框",
|
||||
"saveBboxToGallery": "将边界框保存到图库",
|
||||
"savedToGalleryOk": "已保存到图库",
|
||||
"saveLayerToAssets": "将图层保存到资产",
|
||||
"removeBookmark": "移除书签",
|
||||
"regional": "区域",
|
||||
"saveCanvasToGallery": "将画布保存到图库",
|
||||
"global": "全局",
|
||||
"bookmark": "添加书签以快速切换",
|
||||
"regionalReferenceImage": "局部参考图像",
|
||||
"mergingLayers": "正在合并图层",
|
||||
"newControlLayerError": "创建控制层时出现问题",
|
||||
"pullBboxIntoReferenceImageError": "将边界框导入参考图像时出现问题",
|
||||
"mergeVisibleOk": "已合并图层",
|
||||
"maskFill": "遮罩填充",
|
||||
"newCanvasFromImage": "从图像创建新画布",
|
||||
"pullBboxIntoReferenceImageOk": "边界框已导入到参考图像",
|
||||
"globalReferenceImage_withCount_other": "全局参考图像",
|
||||
"addInpaintMask": "添加 $t(controlLayers.inpaintMask)",
|
||||
"referenceImage": "参考图像",
|
||||
"globalReferenceImage": "全局参考图像",
|
||||
"newRegionalGuidance": "新建 $t(controlLayers.regionalGuidance)",
|
||||
"savedToGalleryError": "保存到图库时出错",
|
||||
"copyRasterLayerTo": "复制 $t(controlLayers.rasterLayer) 到",
|
||||
"clearHistory": "清除历史记录",
|
||||
"inpaintMask": "修复遮罩",
|
||||
"regionalGuidance_withCount_visible": "区域引导({{count}} 个)",
|
||||
"inpaintMasks_withCount_hidden": "修复遮罩({{count}} 个已隐藏)",
|
||||
"enableAutoNegative": "启用自动负面提示",
|
||||
"disableAutoNegative": "禁用自动负面提示",
|
||||
"deleteReferenceImage": "删除参考图像",
|
||||
"sendToCanvas": "发送到画布",
|
||||
"controlLayers_withCount_visible": "控制图层({{count}} 个)",
|
||||
"rasterLayers_withCount_visible": "栅格图层({{count}} 个)",
|
||||
"convertRegionalGuidanceTo": "将 $t(controlLayers.regionalGuidance) 转换为",
|
||||
"newInpaintMask": "新建 $t(controlLayers.inpaintMask)",
|
||||
"regionIsEmpty": "选定区域为空",
|
||||
"mergeVisible": "合并可见图层",
|
||||
"showHUD": "显示 HUD(抬头显示)",
|
||||
"newLayerFromImage": "从图像创建新图层",
|
||||
"layer_other": "图层",
|
||||
"transparency": "透明度",
|
||||
"addRasterLayer": "添加 $t(controlLayers.rasterLayer)",
|
||||
"newRasterLayerOk": "已创建栅格层",
|
||||
"newRasterLayerError": "创建栅格层时出现问题",
|
||||
"inpaintMasks_withCount_visible": "修复遮罩({{count}} 个)",
|
||||
"convertRasterLayerTo": "将 $t(controlLayers.rasterLayer) 转换为",
|
||||
"copyControlLayerTo": "复制 $t(controlLayers.controlLayer) 到",
|
||||
"copyInpaintMaskTo": "复制 $t(controlLayers.inpaintMask) 到",
|
||||
"copyRegionalGuidanceTo": "复制 $t(controlLayers.regionalGuidance) 到",
|
||||
"newRasterLayer": "新建 $t(controlLayers.rasterLayer)",
|
||||
"newControlLayer": "新建 $t(controlLayers.controlLayer)",
|
||||
"newImg2ImgCanvasFromImage": "从图像创建新的图生图",
|
||||
"rasterLayer": "栅格层",
|
||||
"controlLayer": "控制层",
|
||||
"outputOnlyMaskedRegions": "仅输出生成的区域",
|
||||
"addControlLayer": "添加 $t(controlLayers.controlLayer)",
|
||||
"newGlobalReferenceImageOk": "已创建全局参考图像",
|
||||
"newGlobalReferenceImageError": "创建全局参考图像时出现问题",
|
||||
"newRegionalReferenceImageOk": "已创建局部参考图像",
|
||||
"newControlLayerOk": "已创建控制层",
|
||||
"mergeVisibleError": "合并图层时出错",
|
||||
"bboxOverlay": "显示边界框覆盖层",
|
||||
"clipToBbox": "将Clip限制到边界框",
|
||||
"width": "宽度",
|
||||
"addGlobalReferenceImage": "添加 $t(controlLayers.globalReferenceImage)",
|
||||
"inpaintMask_withCount_other": "修复遮罩",
|
||||
"regionalGuidance_withCount_other": "区域引导",
|
||||
"newRegionalReferenceImageError": "创建局部参考图像时出现问题",
|
||||
"pullBboxIntoLayerError": "将边界框导入图层时出现问题",
|
||||
"pullBboxIntoLayerOk": "边界框已导入到图层",
|
||||
"sendToCanvasDesc": "按下“Invoke”按钮会将您的工作进度暂存到画布上。",
|
||||
"sendToGallery": "发送到图库",
|
||||
"sendToGalleryDesc": "按下“Invoke”键会生成并保存一张唯一的图像到您的图库中。",
|
||||
"rasterLayer_withCount_other": "栅格图层",
|
||||
"mergeDown": "向下合并",
|
||||
"clearCaches": "清除缓存",
|
||||
"recalculateRects": "重新计算矩形",
|
||||
"duplicate": "复制",
|
||||
"regionalGuidance_withCount_hidden": "区域引导({{count}} 个已隐藏)",
|
||||
"convertControlLayerTo": "将 $t(controlLayers.controlLayer) 转换为",
|
||||
"convertInpaintMaskTo": "将 $t(controlLayers.inpaintMask) 转换为",
|
||||
"viewProgressInViewer": "在 <Btn>图像查看器</Btn> 中查看进度和输出结果。",
|
||||
"viewProgressOnCanvas": "在 <Btn>画布</Btn> 上查看进度和暂存的输出内容。",
|
||||
"sendingToGallery": "将生成内容发送到图库",
|
||||
"copyToClipboard": "复制到剪贴板",
|
||||
"controlLayer_withCount_other": "控制图层",
|
||||
"sendingToCanvas": "在画布上准备生成",
|
||||
"addReferenceImage": "添加 $t(controlLayers.referenceImage)",
|
||||
"addRegionalGuidance": "添加 $t(controlLayers.regionalGuidance)",
|
||||
"controlLayers_withCount_hidden": "控制图层({{count}} 个已隐藏)",
|
||||
"rasterLayers_withCount_hidden": "栅格图层({{count}} 个已隐藏)",
|
||||
"globalReferenceImages_withCount_hidden": "全局参考图像({{count}} 个已隐藏)",
|
||||
"globalReferenceImages_withCount_visible": "全局参考图像({{count}} 个)",
|
||||
"layer_withCount_other": "图层({{count}} 个)",
|
||||
"enableTransparencyEffect": "启用透明效果",
|
||||
"disableTransparencyEffect": "禁用透明效果",
|
||||
"hidingType": "隐藏 {{type}}",
|
||||
"showingType": "显示 {{type}}"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
|
||||
@@ -27,6 +27,7 @@ import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/Cl
|
||||
import { DeleteStylePresetDialog } from 'features/stylePresets/components/DeleteStylePresetDialog';
|
||||
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
|
||||
import RefreshAfterResetModal from 'features/system/components/SettingsModal/RefreshAfterResetModal';
|
||||
import { VideosModal } from 'features/system/components/VideosModal/VideosModal';
|
||||
import { configChanged } from 'features/system/store/configSlice';
|
||||
import { selectLanguage } from 'features/system/store/systemSelectors';
|
||||
import { AppContent } from 'features/ui/components/AppContent';
|
||||
@@ -108,6 +109,7 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
<NewCanvasSessionDialog />
|
||||
<ImageContextMenu />
|
||||
<FullscreenDropzone />
|
||||
<VideosModal />
|
||||
</ErrorBoundary>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppStore } from 'app/store/storeHooks';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { withResultAsync } from 'common/util/result';
|
||||
@@ -9,6 +10,7 @@ import { imageDTOToImageObject } from 'features/controlLayers/store/util';
|
||||
import { $imageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { sentImageToCanvas } from 'features/gallery/store/actions';
|
||||
import { parseAndRecallAllMetadata } from 'features/metadata/util/handlers';
|
||||
import { $hasTemplates } from 'features/nodes/store/nodesSlice';
|
||||
import { $isWorkflowListMenuIsOpen } from 'features/nodes/store/workflowListMenu';
|
||||
import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
@@ -51,6 +53,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
const { t } = useTranslation();
|
||||
// Use a ref to ensure that we only perform the action once
|
||||
const didInit = useRef(false);
|
||||
const didParseOpenAPISchema = useStore($hasTemplates);
|
||||
const store = useAppStore();
|
||||
const { getAndLoadWorkflow } = useGetAndLoadLibraryWorkflow();
|
||||
|
||||
@@ -174,7 +177,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (didInit.current || !action) {
|
||||
if (didInit.current || !action || !didParseOpenAPISchema) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -187,22 +190,29 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
|
||||
case 'selectStylePreset':
|
||||
handleSelectStylePreset(action.data.stylePresetId);
|
||||
break;
|
||||
|
||||
case 'sendToCanvas':
|
||||
handleSendToCanvas(action.data.imageName);
|
||||
break;
|
||||
|
||||
case 'useAllParameters':
|
||||
handleUseAllMetadata(action.data.imageName);
|
||||
break;
|
||||
|
||||
case 'goToDestination':
|
||||
handleGoToDestination(action.data.destination);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}, [
|
||||
handleSendToCanvas,
|
||||
handleUseAllMetadata,
|
||||
action,
|
||||
handleLoadWorkflow,
|
||||
handleSelectStylePreset,
|
||||
handleGoToDestination,
|
||||
handleLoadWorkflow,
|
||||
didParseOpenAPISchema,
|
||||
]);
|
||||
};
|
||||
|
||||
@@ -4,7 +4,7 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'
|
||||
import { buildAdHocPostProcessingGraph } from 'features/nodes/util/graph/buildAdHocPostProcessingGraph';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig, ImageDTO } from 'services/api/types';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
|
||||
@@ -32,9 +32,7 @@ export const addAdHocPostProcessingRequestedListener = (startAppListening: AppSt
|
||||
|
||||
try {
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, enqueueMutationFixedCacheKeyOptions)
|
||||
);
|
||||
|
||||
const enqueueResult = await req.unwrap();
|
||||
|
||||
@@ -13,7 +13,7 @@ import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGr
|
||||
import type { Graph } from 'features/nodes/util/graph/generation/Graph';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { Invocation } from 'services/api/types';
|
||||
import { assert, AssertionError } from 'tsafe';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
@@ -91,9 +91,7 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
}
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(prepareBatchResult.value, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
queueApi.endpoints.enqueueBatch.initiate(prepareBatchResult.value, enqueueMutationFixedCacheKeyOptions)
|
||||
);
|
||||
req.reset();
|
||||
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
import { isImageFieldCollectionInputInstance } from 'features/nodes/types/field';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { buildNodesGraph } from 'features/nodes/util/graph/buildNodesGraph';
|
||||
import { buildWorkflowWithValidation } from 'features/nodes/util/workflow/buildWorkflow';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig } from 'services/api/types';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
import type { Batch, BatchConfig } from 'services/api/types';
|
||||
|
||||
const log = logger('workflows');
|
||||
|
||||
export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
@@ -26,6 +31,33 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
delete builtWorkflow.id;
|
||||
}
|
||||
|
||||
const data: Batch['data'] = [];
|
||||
|
||||
// Skip edges from batch nodes - these should not be in the graph, they exist only in the UI
|
||||
const imageBatchNodes = nodes.nodes.filter(isInvocationNode).filter((node) => node.data.type === 'image_batch');
|
||||
for (const node of imageBatchNodes) {
|
||||
const images = node.data.inputs['images'];
|
||||
if (!isImageFieldCollectionInputInstance(images)) {
|
||||
log.warn({ nodeId: node.id }, 'Image batch images field is not an image collection');
|
||||
break;
|
||||
}
|
||||
const edgesFromImageBatch = nodes.edges.filter((e) => e.source === node.id && e.sourceHandle === 'image');
|
||||
const batchDataCollectionItem: NonNullable<Batch['data']>[number] = [];
|
||||
for (const edge of edgesFromImageBatch) {
|
||||
if (!edge.targetHandle) {
|
||||
break;
|
||||
}
|
||||
batchDataCollectionItem.push({
|
||||
node_path: edge.target,
|
||||
field_name: edge.targetHandle,
|
||||
items: images.value,
|
||||
});
|
||||
}
|
||||
if (batchDataCollectionItem.length > 0) {
|
||||
data.push(batchDataCollectionItem);
|
||||
}
|
||||
}
|
||||
|
||||
const batchConfig: BatchConfig = {
|
||||
batch: {
|
||||
graph,
|
||||
@@ -33,15 +65,12 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
runs: state.params.iterations,
|
||||
origin: 'workflows',
|
||||
destination: 'gallery',
|
||||
data,
|
||||
},
|
||||
prepend: action.payload.prepend,
|
||||
};
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
const req = dispatch(queueApi.endpoints.enqueueBatch.initiate(batchConfig, enqueueMutationFixedCacheKeyOptions));
|
||||
try {
|
||||
await req.unwrap();
|
||||
} finally {
|
||||
|
||||
@@ -2,7 +2,7 @@ import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildMultidiffusionUpscaleGraph } from 'features/nodes/util/graph/buildMultidiffusionUpscaleGraph';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { enqueueMutationFixedCacheKeyOptions, queueApi } from 'services/api/endpoints/queue';
|
||||
|
||||
export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
@@ -16,11 +16,7 @@ export const addEnqueueRequestedUpscale = (startAppListening: AppStartListening)
|
||||
|
||||
const batchConfig = prepareLinearUIBatch(state, g, prepend, noise, posCond, 'upscaling', 'gallery');
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
const req = dispatch(queueApi.endpoints.enqueueBatch.initiate(batchConfig, enqueueMutationFixedCacheKeyOptions));
|
||||
try {
|
||||
await req.unwrap();
|
||||
} finally {
|
||||
|
||||
@@ -25,9 +25,7 @@ export type AppFeature =
|
||||
| 'invocationCache'
|
||||
| 'bulkDownload'
|
||||
| 'starterModels'
|
||||
| 'hfToken'
|
||||
| 'invocationProgressAlert';
|
||||
|
||||
| 'hfToken';
|
||||
/**
|
||||
* A disable-able Stable Diffusion feature
|
||||
*/
|
||||
|
||||
@@ -2,6 +2,7 @@ import { deepClone } from 'common/util/deepClone';
|
||||
import { merge } from 'lodash-es';
|
||||
import { ClickScrollPlugin, OverlayScrollbars } from 'overlayscrollbars';
|
||||
import type { UseOverlayScrollbarsParams } from 'overlayscrollbars-react';
|
||||
import type { CSSProperties } from 'react';
|
||||
|
||||
OverlayScrollbars.plugin(ClickScrollPlugin);
|
||||
|
||||
@@ -27,3 +28,8 @@ export const getOverlayScrollbarsParams = (
|
||||
merge(params, { options: { overflow: { y: overflowY, x: overflowX } } });
|
||||
return params;
|
||||
};
|
||||
|
||||
export const overlayScrollbarsStyles: CSSProperties = {
|
||||
height: '100%',
|
||||
width: '100%',
|
||||
};
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import {
|
||||
useNewCanvasSession,
|
||||
useNewGallerySession,
|
||||
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
|
||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
||||
import { paramsReset } from 'features/controlLayers/store/paramsSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowsCounterClockwiseBold, PiFilePlusBold } from 'react-icons/pi';
|
||||
|
||||
export const SessionMenuItems = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const { newGallerySessionWithDialog } = useNewGallerySession();
|
||||
const { newCanvasSessionWithDialog } = useNewCanvasSession();
|
||||
const resetCanvasLayers = useCallback(() => {
|
||||
dispatch(canvasReset());
|
||||
}, [dispatch]);
|
||||
const resetGenerationSettings = useCallback(() => {
|
||||
dispatch(paramsReset());
|
||||
}, [dispatch]);
|
||||
return (
|
||||
<>
|
||||
<MenuItem icon={<PiFilePlusBold />} onClick={newGallerySessionWithDialog}>
|
||||
{t('controlLayers.newGallerySession')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiFilePlusBold />} onClick={newCanvasSessionWithDialog}>
|
||||
{t('controlLayers.newCanvasSession')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClick={resetCanvasLayers}>
|
||||
{t('controlLayers.resetCanvasLayers')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiArrowsCounterClockwiseBold />} onClick={resetGenerationSettings}>
|
||||
{t('controlLayers.resetGenerationSettings')}
|
||||
</MenuItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
SessionMenuItems.displayName = 'SessionMenuItems';
|
||||
@@ -46,7 +46,7 @@ const REGION_TARGETS: Record<FocusRegionName, Set<HTMLElement>> = {
|
||||
/**
|
||||
* The currently-focused region or `null` if no region is focused.
|
||||
*/
|
||||
const $focusedRegion = atom<FocusRegionName | null>(null);
|
||||
export const $focusedRegion = atom<FocusRegionName | null>(null);
|
||||
|
||||
/**
|
||||
* A map of focus regions to atoms that indicate if that region is focused.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useClearQueue } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
|
||||
import { useCancelCurrentQueueItem } from 'features/queue/hooks/useCancelCurrentQueueItem';
|
||||
import { useClearQueue } from 'features/queue/hooks/useClearQueue';
|
||||
import { useInvoke } from 'features/queue/hooks/useInvoke';
|
||||
import { useRegisteredHotkeys } from 'features/system/components/HotkeysModal/useHotkeyData';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
|
||||
@@ -141,11 +141,9 @@ export const useImageUploadButton = ({ onUpload, isDisabled, allowMultiple }: Us
|
||||
};
|
||||
|
||||
const sx = {
|
||||
borderColor: 'error.500',
|
||||
borderStyle: 'solid',
|
||||
borderWidth: 0,
|
||||
borderRadius: 'base',
|
||||
'&[data-error=true]': {
|
||||
borderColor: 'error.500',
|
||||
borderStyle: 'solid',
|
||||
borderWidth: 1,
|
||||
},
|
||||
} satisfies SystemStyleObject;
|
||||
@@ -164,7 +162,34 @@ export const UploadImageButton = ({
|
||||
<>
|
||||
<IconButton
|
||||
aria-label="Upload image"
|
||||
variant="ghost"
|
||||
variant="outline"
|
||||
sx={sx}
|
||||
data-error={isError}
|
||||
icon={<PiUploadBold />}
|
||||
isLoading={uploadApi.request.isLoading}
|
||||
{...rest}
|
||||
{...uploadApi.getUploadButtonProps()}
|
||||
/>
|
||||
<input {...uploadApi.getUploadInputProps()} />
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export const UploadMultipleImageButton = ({
|
||||
isDisabled = false,
|
||||
onUpload,
|
||||
isError = false,
|
||||
...rest
|
||||
}: {
|
||||
onUpload?: (imageDTOs: ImageDTO[]) => void;
|
||||
isError?: boolean;
|
||||
} & SetOptional<IconButtonProps, 'aria-label'>) => {
|
||||
const uploadApi = useImageUploadButton({ isDisabled, allowMultiple: true, onUpload });
|
||||
return (
|
||||
<>
|
||||
<IconButton
|
||||
aria-label="Upload image"
|
||||
variant="outline"
|
||||
sx={sx}
|
||||
data-error={isError}
|
||||
icon={<PiUploadBold />}
|
||||
|
||||
@@ -1,331 +0,0 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { $true } from 'app/store/nanostores/util';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useCanvasManagerSafe } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
import type { Templates } from 'features/nodes/store/types';
|
||||
import { selectWorkflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { selectUpscaleSlice } from 'features/parameters/store/upscaleSlice';
|
||||
import { selectConfigSlice } from 'features/system/store/configSlice';
|
||||
import { selectSystemSlice } from 'features/system/store/systemSlice';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import i18n from 'i18next';
|
||||
import { forEach, upperFirst } from 'lodash-es';
|
||||
import { useMemo } from 'react';
|
||||
import { getConnectedEdges } from 'reactflow';
|
||||
import { $isConnected } from 'services/events/stores';
|
||||
|
||||
const LAYER_TYPE_TO_TKEY = {
|
||||
reference_image: 'controlLayers.referenceImage',
|
||||
inpaint_mask: 'controlLayers.inpaintMask',
|
||||
regional_guidance: 'controlLayers.regionalGuidance',
|
||||
raster_layer: 'controlLayers.rasterLayer',
|
||||
control_layer: 'controlLayers.controlLayer',
|
||||
} as const;
|
||||
|
||||
const createSelector = (
|
||||
templates: Templates,
|
||||
isConnected: boolean,
|
||||
canvasIsFiltering: boolean,
|
||||
canvasIsTransforming: boolean,
|
||||
canvasIsRasterizing: boolean,
|
||||
canvasIsCompositing: boolean
|
||||
) =>
|
||||
createMemoizedSelector(
|
||||
[
|
||||
selectSystemSlice,
|
||||
selectNodesSlice,
|
||||
selectWorkflowSettingsSlice,
|
||||
selectDynamicPromptsSlice,
|
||||
selectCanvasSlice,
|
||||
selectParamsSlice,
|
||||
selectUpscaleSlice,
|
||||
selectConfigSlice,
|
||||
selectActiveTab,
|
||||
],
|
||||
(system, nodes, workflowSettings, dynamicPrompts, canvas, params, upscale, config, activeTabName) => {
|
||||
const { bbox } = canvas;
|
||||
const { model, positivePrompt } = params;
|
||||
|
||||
const reasons: { prefix?: string; content: string }[] = [];
|
||||
|
||||
// Cannot generate if not connected
|
||||
if (!isConnected) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.systemDisconnected') });
|
||||
}
|
||||
|
||||
if (activeTabName === 'workflows') {
|
||||
if (workflowSettings.shouldValidateGraph) {
|
||||
if (!nodes.nodes.length) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noNodesInGraph') });
|
||||
}
|
||||
|
||||
nodes.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const nodeTemplate = templates[node.data.type];
|
||||
|
||||
if (!nodeTemplate) {
|
||||
// Node type not found
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingNodeTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
const connectedEdges = getConnectedEdges([node], nodes.edges);
|
||||
|
||||
forEach(node.data.inputs, (field) => {
|
||||
const fieldTemplate = nodeTemplate.inputs[field.name];
|
||||
const hasConnection = connectedEdges.some(
|
||||
(edge) => edge.target === node.id && edge.targetHandle === field.name
|
||||
);
|
||||
|
||||
if (!fieldTemplate) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingFieldTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
if (fieldTemplate.required && field.value === undefined && !hasConnection) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.missingInputForField', {
|
||||
nodeLabel: node.data.label || nodeTemplate.title,
|
||||
fieldLabel: field.label || fieldTemplate.title,
|
||||
}),
|
||||
});
|
||||
return;
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
} else if (activeTabName === 'upscaling') {
|
||||
if (!upscale.upscaleInitialImage) {
|
||||
reasons.push({ content: i18n.t('upscaling.missingUpscaleInitialImage') });
|
||||
} else if (config.maxUpscaleDimension) {
|
||||
const { width, height } = upscale.upscaleInitialImage;
|
||||
const { scale } = upscale;
|
||||
|
||||
const maxPixels = config.maxUpscaleDimension ** 2;
|
||||
const upscaledPixels = width * scale * height * scale;
|
||||
|
||||
if (upscaledPixels > maxPixels) {
|
||||
reasons.push({ content: i18n.t('upscaling.exceedsMaxSize') });
|
||||
}
|
||||
}
|
||||
if (model && !['sd-1', 'sdxl'].includes(model.base)) {
|
||||
// When we are using an upsupported model, do not add the other warnings
|
||||
reasons.push({ content: i18n.t('upscaling.incompatibleBaseModel') });
|
||||
} else {
|
||||
// Using a compatible model, add all warnings
|
||||
if (!model) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelSelected') });
|
||||
}
|
||||
if (!upscale.upscaleModel) {
|
||||
reasons.push({ content: i18n.t('upscaling.missingUpscaleModel') });
|
||||
}
|
||||
if (!upscale.tileControlnetModel) {
|
||||
reasons.push({ content: i18n.t('upscaling.missingTileControlNetModel') });
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (canvasIsFiltering) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsFiltering') });
|
||||
}
|
||||
if (canvasIsTransforming) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsTransforming') });
|
||||
}
|
||||
if (canvasIsRasterizing) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsRasterizing') });
|
||||
}
|
||||
if (canvasIsCompositing) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.canvasIsCompositing') });
|
||||
}
|
||||
|
||||
if (dynamicPrompts.prompts.length === 0 && getShouldProcessPrompt(positivePrompt)) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noPrompts') });
|
||||
}
|
||||
|
||||
if (!model) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelSelected') });
|
||||
}
|
||||
|
||||
if (model?.base === 'flux') {
|
||||
if (!params.t5EncoderModel) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noT5EncoderModelSelected') });
|
||||
}
|
||||
if (!params.clipEmbedModel) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noCLIPEmbedModelSelected') });
|
||||
}
|
||||
if (!params.fluxVAE) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noFLUXVAEModelSelected') });
|
||||
}
|
||||
if (bbox.scaleMethod === 'none') {
|
||||
if (bbox.rect.width % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleBboxWidth', { width: bbox.rect.width }),
|
||||
});
|
||||
}
|
||||
if (bbox.rect.height % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleBboxHeight', { height: bbox.rect.height }),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (bbox.scaledSize.width % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleScaledBboxWidth', {
|
||||
width: bbox.scaledSize.width,
|
||||
}),
|
||||
});
|
||||
}
|
||||
if (bbox.scaledSize.height % 16 !== 0) {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.fluxModelIncompatibleScaledBboxHeight', {
|
||||
height: bbox.scaledSize.height,
|
||||
}),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
canvas.controlLayers.entities
|
||||
.filter((controlLayer) => controlLayer.isEnabled)
|
||||
.forEach((controlLayer, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY['control_layer']);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
// Must have model
|
||||
if (!controlLayer.controlAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (controlLayer.controlAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
canvas.referenceImages.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
|
||||
// Must have model
|
||||
if (!entity.ipAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (entity.ipAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have an image
|
||||
if (!entity.ipAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoImageSelected'));
|
||||
}
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
canvas.regionalGuidance.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
// Must have a region
|
||||
if (entity.objects.length === 0) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.rgNoRegion'));
|
||||
}
|
||||
// Must have at least 1 prompt or IP Adapter
|
||||
if (
|
||||
entity.positivePrompt === null &&
|
||||
entity.negativePrompt === null &&
|
||||
entity.referenceImages.length === 0
|
||||
) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.rgNoPromptsOrIPAdapters'));
|
||||
}
|
||||
entity.referenceImages.forEach(({ ipAdapter }) => {
|
||||
// Must have model
|
||||
if (!ipAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (ipAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have an image
|
||||
if (!ipAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoImageSelected'));
|
||||
}
|
||||
});
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
|
||||
canvas.rasterLayers.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layer_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[entity.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return { isReady: !reasons.length, reasons };
|
||||
}
|
||||
);
|
||||
|
||||
export const useIsReadyToEnqueue = () => {
|
||||
const templates = useStore($templates);
|
||||
const isConnected = useStore($isConnected);
|
||||
const canvasManager = useCanvasManagerSafe();
|
||||
const canvasIsFiltering = useStore(canvasManager?.stateApi.$isFiltering ?? $true);
|
||||
const canvasIsTransforming = useStore(canvasManager?.stateApi.$isTransforming ?? $true);
|
||||
const canvasIsRasterizing = useStore(canvasManager?.stateApi.$isRasterizing ?? $true);
|
||||
const canvasIsCompositing = useStore(canvasManager?.compositor.$isBusy ?? $true);
|
||||
const selector = useMemo(
|
||||
() =>
|
||||
createSelector(
|
||||
templates,
|
||||
isConnected,
|
||||
canvasIsFiltering,
|
||||
canvasIsTransforming,
|
||||
canvasIsRasterizing,
|
||||
canvasIsCompositing
|
||||
),
|
||||
[templates, isConnected, canvasIsFiltering, canvasIsTransforming, canvasIsRasterizing, canvasIsCompositing]
|
||||
);
|
||||
const value = useAppSelector(selector);
|
||||
return value;
|
||||
};
|
||||
@@ -63,7 +63,7 @@ export const CanvasAddEntityButtons = memo(() => {
|
||||
justifyContent="flex-start"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addRegionalGuidance}
|
||||
isDisabled={isFLUX || isSD3}
|
||||
isDisabled={isSD3}
|
||||
>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</Button>
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
import { Alert, AlertDescription, AlertIcon, AlertTitle } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
|
||||
import { useDeferredModelLoadingInvocationProgressMessage } from 'features/controlLayers/hooks/useDeferredModelLoadingInvocationProgressMessage';
|
||||
import { selectIsLocal } from 'features/system/store/configSlice';
|
||||
import { selectSystemShouldShowInvocationProgressDetail } from 'features/system/store/systemSlice';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { $invocationProgressMessage } from 'services/events/stores';
|
||||
|
||||
const CanvasAlertsInvocationProgressContent = memo(() => {
|
||||
const CanvasAlertsInvocationProgressContentLocal = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const invocationProgressMessage = useStore($invocationProgressMessage);
|
||||
|
||||
@@ -23,23 +24,38 @@ const CanvasAlertsInvocationProgressContent = memo(() => {
|
||||
</Alert>
|
||||
);
|
||||
});
|
||||
CanvasAlertsInvocationProgressContent.displayName = 'CanvasAlertsInvocationProgressContent';
|
||||
CanvasAlertsInvocationProgressContentLocal.displayName = 'CanvasAlertsInvocationProgressContentLocal';
|
||||
|
||||
export const CanvasAlertsInvocationProgress = memo(() => {
|
||||
const isProgressMessageAlertEnabled = useFeatureStatus('invocationProgressAlert');
|
||||
const shouldShowInvocationProgressDetail = useAppSelector(selectSystemShouldShowInvocationProgressDetail);
|
||||
const CanvasAlertsInvocationProgressContentCommercial = memo(() => {
|
||||
const message = useDeferredModelLoadingInvocationProgressMessage();
|
||||
|
||||
// The alert is disabled at the system level
|
||||
if (!isProgressMessageAlertEnabled) {
|
||||
if (!message) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// The alert is disabled at the user level
|
||||
return (
|
||||
<Alert status="loading" borderRadius="base" fontSize="sm" shadow="md" w="fit-content">
|
||||
<AlertIcon />
|
||||
<AlertDescription>{message}</AlertDescription>
|
||||
</Alert>
|
||||
);
|
||||
});
|
||||
CanvasAlertsInvocationProgressContentCommercial.displayName = 'CanvasAlertsInvocationProgressContentCommercial';
|
||||
|
||||
export const CanvasAlertsInvocationProgress = memo(() => {
|
||||
const shouldShowInvocationProgressDetail = useAppSelector(selectSystemShouldShowInvocationProgressDetail);
|
||||
const isLocal = useAppSelector(selectIsLocal);
|
||||
|
||||
if (!isLocal) {
|
||||
return <CanvasAlertsInvocationProgressContentCommercial />;
|
||||
}
|
||||
|
||||
// OSS user setting
|
||||
if (!shouldShowInvocationProgressDetail) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return <CanvasAlertsInvocationProgressContent />;
|
||||
return <CanvasAlertsInvocationProgressContentLocal />;
|
||||
});
|
||||
|
||||
CanvasAlertsInvocationProgress.displayName = 'CanvasAlertsInvocationProgress';
|
||||
|
||||
@@ -3,6 +3,7 @@ import { Alert, AlertIcon, AlertTitle } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { useEntityAdapterSafe } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { useEntityTitle } from 'features/controlLayers/hooks/useEntityTitle';
|
||||
import { useEntityTypeIsHidden } from 'features/controlLayers/hooks/useEntityTypeIsHidden';
|
||||
@@ -29,17 +30,23 @@ type AlertData = {
|
||||
title: string;
|
||||
};
|
||||
|
||||
const buildSelectIsEnabled = (entityIdentifier: CanvasEntityIdentifier) =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'CanvasAlertsSelectedEntityStatusContent').isEnabled
|
||||
);
|
||||
|
||||
const buildSelectIsLocked = (entityIdentifier: CanvasEntityIdentifier) =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'CanvasAlertsSelectedEntityStatusContent').isLocked
|
||||
);
|
||||
|
||||
const CanvasAlertsSelectedEntityStatusContent = memo(({ entityIdentifier, adapter }: ContentProps) => {
|
||||
const { t } = useTranslation();
|
||||
const title = useEntityTitle(entityIdentifier);
|
||||
const selectIsEnabled = useMemo(
|
||||
() => createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).isEnabled),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const selectIsLocked = useMemo(
|
||||
() => createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).isLocked),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const selectIsEnabled = useMemo(() => buildSelectIsEnabled(entityIdentifier), [entityIdentifier]);
|
||||
const selectIsLocked = useMemo(() => buildSelectIsLocked(entityIdentifier), [entityIdentifier]);
|
||||
const isEnabled = useAppSelector(selectIsEnabled);
|
||||
const isLocked = useAppSelector(selectIsLocked);
|
||||
const isHidden = useEntityTypeIsHidden(entityIdentifier.type);
|
||||
@@ -115,7 +122,11 @@ export const CanvasAlertsSelectedEntityStatus = memo(() => {
|
||||
return null;
|
||||
}
|
||||
|
||||
return <CanvasAlertsSelectedEntityStatusContent entityIdentifier={selectedEntityIdentifier} adapter={adapter} />;
|
||||
return (
|
||||
<CanvasEntityStateGate entityIdentifier={selectedEntityIdentifier}>
|
||||
<CanvasAlertsSelectedEntityStatusContent entityIdentifier={selectedEntityIdentifier} adapter={adapter} />
|
||||
</CanvasEntityStateGate>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasAlertsSelectedEntityStatus.displayName = 'CanvasAlertsSelectedEntityStatus';
|
||||
|
||||
@@ -5,6 +5,7 @@ import { InpaintMaskMenuItems } from 'features/controlLayers/components/InpaintM
|
||||
import { IPAdapterMenuItems } from 'features/controlLayers/components/IPAdapter/IPAdapterMenuItems';
|
||||
import { RasterLayerMenuItems } from 'features/controlLayers/components/RasterLayer/RasterLayerMenuItems';
|
||||
import { RegionalGuidanceMenuItems } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceMenuItems';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import {
|
||||
EntityIdentifierContext,
|
||||
useEntityIdentifierContext,
|
||||
@@ -40,6 +41,15 @@ const CanvasContextMenuSelectedEntityMenuItemsContent = memo(() => {
|
||||
|
||||
CanvasContextMenuSelectedEntityMenuItemsContent.displayName = 'CanvasContextMenuSelectedEntityMenuItemsContent';
|
||||
|
||||
const CanvasContextMenuSelectedEntityMenuGroup = memo((props: PropsWithChildren) => {
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const title = useEntityTypeString(entityIdentifier.type);
|
||||
|
||||
return <MenuGroup title={title}>{props.children}</MenuGroup>;
|
||||
});
|
||||
|
||||
CanvasContextMenuSelectedEntityMenuGroup.displayName = 'CanvasContextMenuSelectedEntityMenuGroup';
|
||||
|
||||
export const CanvasContextMenuSelectedEntityMenuItems = memo(() => {
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
|
||||
@@ -49,20 +59,13 @@ export const CanvasContextMenuSelectedEntityMenuItems = memo(() => {
|
||||
|
||||
return (
|
||||
<EntityIdentifierContext.Provider value={selectedEntityIdentifier}>
|
||||
<CanvasContextMenuSelectedEntityMenuGroup>
|
||||
<CanvasContextMenuSelectedEntityMenuItemsContent />
|
||||
</CanvasContextMenuSelectedEntityMenuGroup>
|
||||
<CanvasEntityStateGate entityIdentifier={selectedEntityIdentifier}>
|
||||
<CanvasContextMenuSelectedEntityMenuGroup>
|
||||
<CanvasContextMenuSelectedEntityMenuItemsContent />
|
||||
</CanvasContextMenuSelectedEntityMenuGroup>
|
||||
</CanvasEntityStateGate>
|
||||
</EntityIdentifierContext.Provider>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasContextMenuSelectedEntityMenuItems.displayName = 'CanvasContextMenuSelectedEntityMenuItems';
|
||||
|
||||
const CanvasContextMenuSelectedEntityMenuGroup = memo((props: PropsWithChildren) => {
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const title = useEntityTypeString(entityIdentifier.type);
|
||||
|
||||
return <MenuGroup title={title}>{props.children}</MenuGroup>;
|
||||
});
|
||||
|
||||
CanvasContextMenuSelectedEntityMenuGroup.displayName = 'CanvasContextMenuSelectedEntityMenuGroup';
|
||||
|
||||
@@ -49,7 +49,7 @@ export const EntityListGlobalActionBarAddLayerMenu = memo(() => {
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask}>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance} isDisabled={isFLUX || isSD3}>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance} isDisabled={isSD3}>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalReferenceImage} isDisabled={isFLUX || isSD3}>
|
||||
|
||||
@@ -7,6 +7,7 @@ import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/c
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { ControlLayerBadges } from 'features/controlLayers/components/ControlLayer/ControlLayerBadges';
|
||||
import { ControlLayerSettings } from 'features/controlLayers/components/ControlLayer/ControlLayerSettings';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { ControlLayerAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
@@ -36,24 +37,26 @@ export const ControlLayer = memo(({ id }: Props) => {
|
||||
return (
|
||||
<EntityIdentifierContext.Provider value={entityIdentifier}>
|
||||
<ControlLayerAdapterGate>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<ControlLayerBadges />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<CanvasEntitySettingsWrapper>
|
||||
<ControlLayerSettings />
|
||||
</CanvasEntitySettingsWrapper>
|
||||
<DndDropTarget
|
||||
dndTarget={replaceCanvasEntityObjectsWithImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.replaceLayer')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</CanvasEntityContainer>
|
||||
<CanvasEntityStateGate entityIdentifier={entityIdentifier}>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<ControlLayerBadges />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<CanvasEntitySettingsWrapper>
|
||||
<ControlLayerSettings />
|
||||
</CanvasEntitySettingsWrapper>
|
||||
<DndDropTarget
|
||||
dndTarget={replaceCanvasEntityObjectsWithImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.replaceLayer')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</CanvasEntityContainer>
|
||||
</CanvasEntityStateGate>
|
||||
</ControlLayerAdapterGate>
|
||||
</EntityIdentifierContext.Provider>
|
||||
);
|
||||
|
||||
@@ -1,26 +1,47 @@
|
||||
import { Badge } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import { memo } from 'react';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const ControlLayerBadges = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const { t } = useTranslation();
|
||||
const withTransparencyEffect = useAppSelector(
|
||||
(s) => selectEntityOrThrow(selectCanvasSlice(s), entityIdentifier).withTransparencyEffect
|
||||
const buildSelectWithTransparencyEffect = (entityIdentifier: CanvasEntityIdentifier<'control_layer'>) =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'ControlLayerBadgesContent').withTransparencyEffect
|
||||
);
|
||||
|
||||
const ControlLayerBadgesContent = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const { t } = useTranslation();
|
||||
const selectWithTransparencyEffect = useMemo(
|
||||
() => buildSelectWithTransparencyEffect(entityIdentifier),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const withTransparencyEffect = useAppSelector(selectWithTransparencyEffect);
|
||||
|
||||
if (!withTransparencyEffect) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
{withTransparencyEffect && (
|
||||
<Badge color="base.300" bg="transparent" borderWidth={1} userSelect="none">
|
||||
{t('controlLayers.transparency')}
|
||||
</Badge>
|
||||
)}
|
||||
</>
|
||||
<Badge color="base.300" bg="transparent" borderWidth={1} userSelect="none">
|
||||
{t('controlLayers.transparency')}
|
||||
</Badge>
|
||||
);
|
||||
});
|
||||
|
||||
ControlLayerBadgesContent.displayName = 'ControlLayerBadgesContent';
|
||||
|
||||
export const ControlLayerBadges = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
return (
|
||||
<CanvasEntityStateGate entityIdentifier={entityIdentifier}>
|
||||
<ControlLayerBadgesContent />
|
||||
</CanvasEntityStateGate>
|
||||
);
|
||||
});
|
||||
ControlLayerBadges.displayName = 'ControlLayerBadges';
|
||||
|
||||
@@ -28,24 +28,18 @@ import { useTranslation } from 'react-i18next';
|
||||
import { PiBoundingBoxBold, PiShootingStarFill, PiUploadBold } from 'react-icons/pi';
|
||||
import type { ControlNetModelConfig, ImageDTO, T2IAdapterModelConfig } from 'services/api/types';
|
||||
|
||||
const useControlLayerControlAdapter = (entityIdentifier: CanvasEntityIdentifier<'control_layer'>) => {
|
||||
const selectControlAdapter = useMemo(
|
||||
() =>
|
||||
createMemoizedAppSelector(selectCanvasSlice, (canvas) => {
|
||||
const layer = selectEntityOrThrow(canvas, entityIdentifier);
|
||||
return layer.controlAdapter;
|
||||
}),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const controlAdapter = useAppSelector(selectControlAdapter);
|
||||
return controlAdapter;
|
||||
};
|
||||
const buildSelectControlAdapter = (entityIdentifier: CanvasEntityIdentifier<'control_layer'>) =>
|
||||
createMemoizedAppSelector(selectCanvasSlice, (canvas) => {
|
||||
const layer = selectEntityOrThrow(canvas, entityIdentifier, 'ControlLayerControlAdapter');
|
||||
return layer.controlAdapter;
|
||||
});
|
||||
|
||||
export const ControlLayerControlAdapter = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const { dispatch, getState } = useAppStore();
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const controlAdapter = useControlLayerControlAdapter(entityIdentifier);
|
||||
const selectControlAdapter = useMemo(() => buildSelectControlAdapter(entityIdentifier), [entityIdentifier]);
|
||||
const controlAdapter = useAppSelector(selectControlAdapter);
|
||||
const filter = useEntityFilter(entityIdentifier);
|
||||
const isFLUX = useAppSelector(selectIsFLUX);
|
||||
const adapter = useEntityAdapterContext('control_layer');
|
||||
|
||||
@@ -5,21 +5,25 @@ import { useEntityIdentifierContext } from 'features/controlLayers/contexts/Enti
|
||||
import { useEntityIsLocked } from 'features/controlLayers/hooks/useEntityIsLocked';
|
||||
import { controlLayerWithTransparencyEffectToggled } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiDropHalfBold } from 'react-icons/pi';
|
||||
|
||||
const buildSelectWithTransparencyEffect = (entityIdentifier: CanvasEntityIdentifier<'control_layer'>) =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) =>
|
||||
selectEntityOrThrow(canvas, entityIdentifier, 'ControlLayerMenuItemsTransparencyEffect').withTransparencyEffect
|
||||
);
|
||||
|
||||
export const ControlLayerMenuItemsTransparencyEffect = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const isLocked = useEntityIsLocked(entityIdentifier);
|
||||
const selectWithTransparencyEffect = useMemo(
|
||||
() =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier);
|
||||
return entity.withTransparencyEffect;
|
||||
}),
|
||||
() => buildSelectWithTransparencyEffect(entityIdentifier),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const withTransparencyEffect = useAppSelector(selectWithTransparencyEffect);
|
||||
|
||||
@@ -4,6 +4,7 @@ import { CanvasEntityHeader } from 'features/controlLayers/components/common/Can
|
||||
import { CanvasEntityHeaderCommonActions } from 'features/controlLayers/components/common/CanvasEntityHeaderCommonActions';
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { IPAdapterSettings } from 'features/controlLayers/components/IPAdapter/IPAdapterSettings';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo, useMemo } from 'react';
|
||||
@@ -17,14 +18,16 @@ export const IPAdapter = memo(({ id }: Props) => {
|
||||
|
||||
return (
|
||||
<EntityIdentifierContext.Provider value={entityIdentifier}>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader ps={4} py={5}>
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<IPAdapterSettings />
|
||||
</CanvasEntityContainer>
|
||||
<CanvasEntityStateGate entityIdentifier={entityIdentifier}>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader ps={4} py={5}>
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<IPAdapterSettings />
|
||||
</CanvasEntityContainer>
|
||||
</CanvasEntityStateGate>
|
||||
</EntityIdentifierContext.Provider>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import type { IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import { isIPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import { selectSystemShouldEnableModelDescriptions } from 'features/system/store/systemSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { assert } from 'tsafe';
|
||||
@@ -14,13 +16,27 @@ type Props = {
|
||||
|
||||
export const IPAdapterMethod = memo(({ method, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const shouldShowModelDescriptions = useAppSelector(selectSystemShouldEnableModelDescriptions);
|
||||
|
||||
const options: { label: string; value: IPMethodV2 }[] = useMemo(
|
||||
() => [
|
||||
{ label: t('controlLayers.ipAdapterMethod.full'), value: 'full' },
|
||||
{ label: t('controlLayers.ipAdapterMethod.style'), value: 'style' },
|
||||
{ label: t('controlLayers.ipAdapterMethod.composition'), value: 'composition' },
|
||||
{
|
||||
label: t('controlLayers.ipAdapterMethod.full'),
|
||||
value: 'full',
|
||||
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.fullDesc') : undefined,
|
||||
},
|
||||
{
|
||||
label: t('controlLayers.ipAdapterMethod.style'),
|
||||
value: 'style',
|
||||
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.styleDesc') : undefined,
|
||||
},
|
||||
{
|
||||
label: t('controlLayers.ipAdapterMethod.composition'),
|
||||
value: 'composition',
|
||||
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.compositionDesc') : undefined,
|
||||
},
|
||||
],
|
||||
[t]
|
||||
[t, shouldShowModelDescriptions]
|
||||
);
|
||||
const _onChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
|
||||
@@ -5,6 +5,7 @@ import { BeginEndStepPct } from 'features/controlLayers/components/common/BeginE
|
||||
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
|
||||
import { Weight } from 'features/controlLayers/components/common/Weight';
|
||||
import { IPAdapterMethod } from 'features/controlLayers/components/IPAdapter/IPAdapterMethod';
|
||||
import { IPAdapterSettingsEmptyState } from 'features/controlLayers/components/IPAdapter/IPAdapterSettingsEmptyState';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { usePullBboxIntoGlobalReferenceImage } from 'features/controlLayers/hooks/saveCanvasHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
@@ -17,8 +18,8 @@ import {
|
||||
referenceImageIPAdapterWeightChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectIsFLUX } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import { selectCanvasSlice, selectEntity, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier, CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import type { SetGlobalReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
import { setGlobalReferenceImageDndTarget } from 'features/dnd/dnd';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
@@ -29,14 +30,17 @@ import type { ImageDTO, IPAdapterModelConfig } from 'services/api/types';
|
||||
import { IPAdapterImagePreview } from './IPAdapterImagePreview';
|
||||
import { IPAdapterModel } from './IPAdapterModel';
|
||||
|
||||
export const IPAdapterSettings = memo(() => {
|
||||
const buildSelectIPAdapter = (entityIdentifier: CanvasEntityIdentifier<'reference_image'>) =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'IPAdapterSettings').ipAdapter
|
||||
);
|
||||
|
||||
const IPAdapterSettingsContent = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('reference_image');
|
||||
const selectIPAdapter = useMemo(
|
||||
() => createSelector(selectCanvasSlice, (s) => selectEntityOrThrow(s, entityIdentifier).ipAdapter),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const selectIPAdapter = useMemo(() => buildSelectIPAdapter(entityIdentifier), [entityIdentifier]);
|
||||
const ipAdapter = useAppSelector(selectIPAdapter);
|
||||
|
||||
const onChangeBeginEndStepPct = useCallback(
|
||||
@@ -131,4 +135,25 @@ export const IPAdapterSettings = memo(() => {
|
||||
);
|
||||
});
|
||||
|
||||
IPAdapterSettingsContent.displayName = 'IPAdapterSettingsContent';
|
||||
|
||||
const buildSelectIPAdapterHasImage = (entityIdentifier: CanvasEntityIdentifier<'reference_image'>) =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const referenceImage = selectEntity(canvas, entityIdentifier);
|
||||
return !!referenceImage && referenceImage.ipAdapter.image !== null;
|
||||
});
|
||||
|
||||
export const IPAdapterSettings = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('reference_image');
|
||||
|
||||
const selectIPAdapterHasImage = useMemo(() => buildSelectIPAdapterHasImage(entityIdentifier), [entityIdentifier]);
|
||||
const hasImage = useAppSelector(selectIPAdapterHasImage);
|
||||
|
||||
if (!hasImage) {
|
||||
return <IPAdapterSettingsEmptyState />;
|
||||
}
|
||||
|
||||
return <IPAdapterSettingsContent />;
|
||||
});
|
||||
|
||||
IPAdapterSettings.displayName = 'IPAdapterSettings';
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
import { Button, Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import type { SetGlobalReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
import { setGlobalReferenceImageDndTarget } from 'features/dnd/dnd';
|
||||
import { DndDropTarget } from 'features/dnd/DndDropTarget';
|
||||
import { setGlobalReferenceImage } from 'features/imageActions/actions';
|
||||
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { Trans, useTranslation } from 'react-i18next';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
export const IPAdapterSettingsEmptyState = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const entityIdentifier = useEntityIdentifierContext('reference_image');
|
||||
const dispatch = useAppDispatch();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const onUpload = useCallback(
|
||||
(imageDTO: ImageDTO) => {
|
||||
setGlobalReferenceImage({ imageDTO, entityIdentifier, dispatch });
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
);
|
||||
const uploadApi = useImageUploadButton({ onUpload, allowMultiple: false });
|
||||
const onClickGalleryButton = useCallback(() => {
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}, [dispatch]);
|
||||
|
||||
const dndTargetData = useMemo<SetGlobalReferenceImageDndTargetData>(
|
||||
() => setGlobalReferenceImageDndTarget.getData({ entityIdentifier }),
|
||||
[entityIdentifier]
|
||||
);
|
||||
|
||||
const components = useMemo(
|
||||
() => ({
|
||||
UploadButton: (
|
||||
<Button isDisabled={isBusy} size="sm" variant="link" color="base.300" {...uploadApi.getUploadButtonProps()} />
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}),
|
||||
[isBusy, onClickGalleryButton, uploadApi]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={3} position="relative" w="full" p={4}>
|
||||
<Text textAlign="center" color="base.300">
|
||||
<Trans i18nKey="controlLayers.referenceImageEmptyState" components={components} />
|
||||
</Text>
|
||||
<input {...uploadApi.getUploadInputProps()} />
|
||||
<DndDropTarget
|
||||
dndTarget={setGlobalReferenceImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.useImage')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
IPAdapterSettingsEmptyState.displayName = 'IPAdapterSettingsEmptyState';
|
||||
@@ -4,6 +4,7 @@ import { CanvasEntityHeader } from 'features/controlLayers/components/common/Can
|
||||
import { CanvasEntityHeaderCommonActions } from 'features/controlLayers/components/common/CanvasEntityHeaderCommonActions';
|
||||
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { InpaintMaskAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
@@ -19,14 +20,16 @@ export const InpaintMask = memo(({ id }: Props) => {
|
||||
return (
|
||||
<EntityIdentifierContext.Provider value={entityIdentifier}>
|
||||
<InpaintMaskAdapterGate>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
</CanvasEntityContainer>
|
||||
<CanvasEntityStateGate entityIdentifier={entityIdentifier}>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
</CanvasEntityContainer>
|
||||
</CanvasEntityStateGate>
|
||||
</InpaintMaskAdapterGate>
|
||||
</EntityIdentifierContext.Provider>
|
||||
);
|
||||
|
||||
@@ -4,6 +4,7 @@ import { CanvasEntityHeader } from 'features/controlLayers/components/common/Can
|
||||
import { CanvasEntityHeaderCommonActions } from 'features/controlLayers/components/common/CanvasEntityHeaderCommonActions';
|
||||
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { RasterLayerAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
@@ -30,20 +31,22 @@ export const RasterLayer = memo(({ id }: Props) => {
|
||||
return (
|
||||
<EntityIdentifierContext.Provider value={entityIdentifier}>
|
||||
<RasterLayerAdapterGate>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<DndDropTarget
|
||||
dndTarget={replaceCanvasEntityObjectsWithImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.replaceLayer')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</CanvasEntityContainer>
|
||||
<CanvasEntityStateGate entityIdentifier={entityIdentifier}>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<DndDropTarget
|
||||
dndTarget={replaceCanvasEntityObjectsWithImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.replaceLayer')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</CanvasEntityContainer>
|
||||
</CanvasEntityStateGate>
|
||||
</RasterLayerAdapterGate>
|
||||
</EntityIdentifierContext.Provider>
|
||||
);
|
||||
|
||||
@@ -6,6 +6,7 @@ import { CanvasEntityPreviewImage } from 'features/controlLayers/components/comm
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { RegionalGuidanceBadges } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceBadges';
|
||||
import { RegionalGuidanceSettings } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceSettings';
|
||||
import { CanvasEntityStateGate } from 'features/controlLayers/contexts/CanvasEntityStateGate';
|
||||
import { RegionalGuidanceAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
@@ -24,16 +25,18 @@ export const RegionalGuidance = memo(({ id }: Props) => {
|
||||
return (
|
||||
<EntityIdentifierContext.Provider value={entityIdentifier}>
|
||||
<RegionalGuidanceAdapterGate>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<RegionalGuidanceBadges />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<RegionalGuidanceSettings />
|
||||
</CanvasEntityContainer>
|
||||
<CanvasEntityStateGate entityIdentifier={entityIdentifier}>
|
||||
<CanvasEntityContainer>
|
||||
<CanvasEntityHeader>
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<RegionalGuidanceBadges />
|
||||
<CanvasEntityHeaderCommonActions />
|
||||
</CanvasEntityHeader>
|
||||
<RegionalGuidanceSettings />
|
||||
</CanvasEntityContainer>
|
||||
</CanvasEntityStateGate>
|
||||
</RegionalGuidanceAdapterGate>
|
||||
</EntityIdentifierContext.Provider>
|
||||
);
|
||||
|
||||
@@ -10,7 +10,11 @@ export const RegionalGuidanceBadges = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const { t } = useTranslation();
|
||||
const selectAutoNegative = useMemo(
|
||||
() => createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).autoNegative),
|
||||
() =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'RegionalGuidanceBadges').autoNegative
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const autoNegative = useAppSelector(selectAutoNegative);
|
||||
|
||||
@@ -1,27 +1,28 @@
|
||||
import { IconButton, Tooltip } from '@invoke-ai/ui-library';
|
||||
import type { IconButtonProps } from '@invoke-ai/ui-library';
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiTrashSimpleFill } from 'react-icons/pi';
|
||||
import { PiXBold } from 'react-icons/pi';
|
||||
|
||||
type Props = {
|
||||
type Props = Omit<IconButtonProps, 'aria-label'> & {
|
||||
onDelete: () => void;
|
||||
};
|
||||
|
||||
export const RegionalGuidanceDeletePromptButton = memo(({ onDelete }: Props) => {
|
||||
export const RegionalGuidanceDeletePromptButton = memo(({ onDelete, ...rest }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Tooltip label={t('controlLayers.deletePrompt')}>
|
||||
<IconButton
|
||||
variant="link"
|
||||
aria-label={t('controlLayers.deletePrompt')}
|
||||
icon={<PiTrashSimpleFill />}
|
||||
onClick={onDelete}
|
||||
flexGrow={0}
|
||||
size="sm"
|
||||
p={0}
|
||||
colorScheme="error"
|
||||
/>
|
||||
</Tooltip>
|
||||
<IconButton
|
||||
tooltip={t('common.delete')}
|
||||
variant="link"
|
||||
aria-label={t('common.delete')}
|
||||
icon={<PiXBold />}
|
||||
onClick={onDelete}
|
||||
flexGrow={0}
|
||||
size="sm"
|
||||
p={0}
|
||||
colorScheme="error"
|
||||
{...rest}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import { Weight } from 'features/controlLayers/components/common/Weight';
|
||||
import { IPAdapterImagePreview } from 'features/controlLayers/components/IPAdapter/IPAdapterImagePreview';
|
||||
import { IPAdapterMethod } from 'features/controlLayers/components/IPAdapter/IPAdapterMethod';
|
||||
import { IPAdapterModel } from 'features/controlLayers/components/IPAdapter/IPAdapterModel';
|
||||
import { RegionalGuidanceIPAdapterSettingsEmptyState } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceIPAdapterSettingsEmptyState';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { usePullBboxIntoRegionalGuidanceReferenceImage } from 'features/controlLayers/hooks/saveCanvasHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
@@ -19,12 +20,12 @@ import {
|
||||
rgIPAdapterWeightChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectRegionalGuidanceReferenceImage } from 'features/controlLayers/store/selectors';
|
||||
import type { CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import type { CanvasEntityIdentifier, CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import type { SetRegionalGuidanceReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
import { setRegionalGuidanceReferenceImageDndTarget } from 'features/dnd/dnd';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiBoundingBoxBold, PiTrashSimpleFill } from 'react-icons/pi';
|
||||
import { PiBoundingBoxBold, PiXBold } from 'react-icons/pi';
|
||||
import type { ImageDTO, IPAdapterModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
@@ -32,7 +33,7 @@ type Props = {
|
||||
referenceImageId: string;
|
||||
};
|
||||
|
||||
export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Props) => {
|
||||
const RegionalGuidanceIPAdapterSettingsContent = memo(({ referenceImageId }: Props) => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
@@ -115,7 +116,7 @@ export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Pro
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
icon={<PiTrashSimpleFill />}
|
||||
icon={<PiXBold />}
|
||||
tooltip={t('controlLayers.deleteReferenceImage')}
|
||||
aria-label={t('controlLayers.deleteReferenceImage')}
|
||||
onClick={onDeleteIPAdapter}
|
||||
@@ -161,4 +162,31 @@ export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Pro
|
||||
);
|
||||
});
|
||||
|
||||
RegionalGuidanceIPAdapterSettingsContent.displayName = 'RegionalGuidanceIPAdapterSettingsContent';
|
||||
|
||||
const buildSelectIPAdapterHasImage = (
|
||||
entityIdentifier: CanvasEntityIdentifier<'regional_guidance'>,
|
||||
referenceImageId: string
|
||||
) =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const referenceImage = selectRegionalGuidanceReferenceImage(canvas, entityIdentifier, referenceImageId);
|
||||
return !!referenceImage && referenceImage.ipAdapter.image !== null;
|
||||
});
|
||||
|
||||
export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Props) => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
|
||||
const selectIPAdapterHasImage = useMemo(
|
||||
() => buildSelectIPAdapterHasImage(entityIdentifier, referenceImageId),
|
||||
[entityIdentifier, referenceImageId]
|
||||
);
|
||||
const hasImage = useAppSelector(selectIPAdapterHasImage);
|
||||
|
||||
if (!hasImage) {
|
||||
return <RegionalGuidanceIPAdapterSettingsEmptyState referenceImageId={referenceImageId} />;
|
||||
}
|
||||
|
||||
return <RegionalGuidanceIPAdapterSettingsContent referenceImageId={referenceImageId} />;
|
||||
});
|
||||
|
||||
RegionalGuidanceIPAdapterSettings.displayName = 'RegionalGuidanceIPAdapterSettings';
|
||||
|
||||
@@ -0,0 +1,99 @@
|
||||
import { Button, Flex, IconButton, Spacer, Text } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { rgIPAdapterDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||
import type { SetRegionalGuidanceReferenceImageDndTargetData } from 'features/dnd/dnd';
|
||||
import { setRegionalGuidanceReferenceImageDndTarget } from 'features/dnd/dnd';
|
||||
import { DndDropTarget } from 'features/dnd/DndDropTarget';
|
||||
import { setRegionalGuidanceReferenceImage } from 'features/imageActions/actions';
|
||||
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { Trans, useTranslation } from 'react-i18next';
|
||||
import { PiXBold } from 'react-icons/pi';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
referenceImageId: string;
|
||||
};
|
||||
|
||||
export const RegionalGuidanceIPAdapterSettingsEmptyState = memo(({ referenceImageId }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const dispatch = useAppDispatch();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const onUpload = useCallback(
|
||||
(imageDTO: ImageDTO) => {
|
||||
setRegionalGuidanceReferenceImage({ imageDTO, entityIdentifier, referenceImageId, dispatch });
|
||||
},
|
||||
[dispatch, entityIdentifier, referenceImageId]
|
||||
);
|
||||
const uploadApi = useImageUploadButton({ onUpload, allowMultiple: false });
|
||||
const onClickGalleryButton = useCallback(() => {
|
||||
dispatch(activeTabCanvasRightPanelChanged('gallery'));
|
||||
}, [dispatch]);
|
||||
const onDeleteIPAdapter = useCallback(() => {
|
||||
dispatch(rgIPAdapterDeleted({ entityIdentifier, referenceImageId }));
|
||||
}, [dispatch, entityIdentifier, referenceImageId]);
|
||||
|
||||
const dndTargetData = useMemo<SetRegionalGuidanceReferenceImageDndTargetData>(
|
||||
() =>
|
||||
setRegionalGuidanceReferenceImageDndTarget.getData({
|
||||
entityIdentifier,
|
||||
referenceImageId,
|
||||
}),
|
||||
[entityIdentifier, referenceImageId]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={2} position="relative" w="full">
|
||||
<Flex alignItems="center" gap={2}>
|
||||
<Text fontWeight="semibold" color="base.400">
|
||||
{t('controlLayers.referenceImage')}
|
||||
</Text>
|
||||
<Spacer />
|
||||
<IconButton
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
icon={<PiXBold />}
|
||||
tooltip={t('controlLayers.deleteReferenceImage')}
|
||||
aria-label={t('controlLayers.deleteReferenceImage')}
|
||||
onClick={onDeleteIPAdapter}
|
||||
colorScheme="error"
|
||||
/>
|
||||
</Flex>
|
||||
<Flex alignItems="center" gap={2} p={4}>
|
||||
<Text textAlign="center" color="base.300">
|
||||
<Trans
|
||||
i18nKey="controlLayers.referenceImageEmptyState"
|
||||
components={{
|
||||
UploadButton: (
|
||||
<Button
|
||||
isDisabled={isBusy}
|
||||
size="sm"
|
||||
variant="link"
|
||||
color="base.300"
|
||||
{...uploadApi.getUploadButtonProps()}
|
||||
/>
|
||||
),
|
||||
GalleryButton: (
|
||||
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
|
||||
),
|
||||
}}
|
||||
/>
|
||||
</Text>
|
||||
</Flex>
|
||||
<input {...uploadApi.getUploadInputProps()} />
|
||||
<DndDropTarget
|
||||
dndTarget={setRegionalGuidanceReferenceImageDndTarget}
|
||||
dndTargetData={dndTargetData}
|
||||
label={t('controlLayers.useImage')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
RegionalGuidanceIPAdapterSettingsEmptyState.displayName = 'RegionalGuidanceIPAdapterSettingsEmptyState';
|
||||
@@ -13,7 +13,11 @@ export const RegionalGuidanceIPAdapters = memo(() => {
|
||||
const selectIPAdapterIds = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
const ipAdapterIds = selectEntityOrThrow(canvas, entityIdentifier).referenceImages.map(({ id }) => id);
|
||||
const ipAdapterIds = selectEntityOrThrow(
|
||||
canvas,
|
||||
entityIdentifier,
|
||||
'RegionalGuidanceIPAdapters'
|
||||
).referenceImages.map(({ id }) => id);
|
||||
if (ipAdapterIds.length === 0) {
|
||||
return EMPTY_ARRAY;
|
||||
}
|
||||
|
||||
@@ -13,7 +13,11 @@ export const RegionalGuidanceMenuItemsAutoNegative = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const selectAutoNegative = useMemo(
|
||||
() => createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).autoNegative),
|
||||
() =>
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'RegionalGuidanceMenuItemsAutoNegative').autoNegative
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const autoNegative = useAppSelector(selectAutoNegative);
|
||||
|
||||
@@ -20,7 +20,10 @@ export const RegionalGuidanceNegativePrompt = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const selectPrompt = useMemo(
|
||||
() =>
|
||||
createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).negativePrompt ?? ''),
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'RegionalGuidanceNegativePrompt').negativePrompt ?? ''
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const prompt = useAppSelector(selectPrompt);
|
||||
|
||||
@@ -20,7 +20,10 @@ export const RegionalGuidancePositivePrompt = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const selectPrompt = useMemo(
|
||||
() =>
|
||||
createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).positivePrompt ?? ''),
|
||||
createSelector(
|
||||
selectCanvasSlice,
|
||||
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'RegionalGuidancePositivePrompt').positivePrompt ?? ''
|
||||
),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const prompt = useAppSelector(selectPrompt);
|
||||
|
||||
@@ -5,26 +5,25 @@ import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/c
|
||||
import { RegionalGuidanceAddPromptsIPAdapterButtons } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceAddPromptsIPAdapterButtons';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
import { RegionalGuidanceIPAdapters } from './RegionalGuidanceIPAdapters';
|
||||
import { RegionalGuidanceNegativePrompt } from './RegionalGuidanceNegativePrompt';
|
||||
import { RegionalGuidancePositivePrompt } from './RegionalGuidancePositivePrompt';
|
||||
|
||||
const buildSelectFlags = (entityIdentifier: CanvasEntityIdentifier<'regional_guidance'>) =>
|
||||
createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier, 'RegionalGuidanceSettings');
|
||||
return {
|
||||
hasPositivePrompt: entity.positivePrompt !== null,
|
||||
hasNegativePrompt: entity.negativePrompt !== null,
|
||||
hasIPAdapters: entity.referenceImages.length > 0,
|
||||
};
|
||||
});
|
||||
export const RegionalGuidanceSettings = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
|
||||
const selectFlags = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier);
|
||||
return {
|
||||
hasPositivePrompt: entity.positivePrompt !== null,
|
||||
hasNegativePrompt: entity.negativePrompt !== null,
|
||||
hasIPAdapters: entity.referenceImages.length > 0,
|
||||
};
|
||||
}),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const selectFlags = useMemo(() => buildSelectFlags(entityIdentifier), [entityIdentifier]);
|
||||
const flags = useAppSelector(selectFlags);
|
||||
|
||||
return (
|
||||
|
||||
@@ -5,6 +5,7 @@ import { StagingAreaToolbarDiscardSelectedButton } from 'features/controlLayers/
|
||||
import { StagingAreaToolbarImageCountButton } from 'features/controlLayers/components/StagingArea/StagingAreaToolbarImageCountButton';
|
||||
import { StagingAreaToolbarNextButton } from 'features/controlLayers/components/StagingArea/StagingAreaToolbarNextButton';
|
||||
import { StagingAreaToolbarPrevButton } from 'features/controlLayers/components/StagingArea/StagingAreaToolbarPrevButton';
|
||||
import { StagingAreaToolbarSaveAsMenu } from 'features/controlLayers/components/StagingArea/StagingAreaToolbarSaveAsMenu';
|
||||
import { StagingAreaToolbarSaveSelectedToGalleryButton } from 'features/controlLayers/components/StagingArea/StagingAreaToolbarSaveSelectedToGalleryButton';
|
||||
import { StagingAreaToolbarToggleShowResultsButton } from 'features/controlLayers/components/StagingArea/StagingAreaToolbarToggleShowResultsButton';
|
||||
import { memo } from 'react';
|
||||
@@ -21,6 +22,7 @@ export const StagingAreaToolbar = memo(() => {
|
||||
<StagingAreaToolbarAcceptButton />
|
||||
<StagingAreaToolbarToggleShowResultsButton />
|
||||
<StagingAreaToolbarSaveSelectedToGalleryButton />
|
||||
<StagingAreaToolbarSaveAsMenu />
|
||||
<StagingAreaToolbarDiscardSelectedButton />
|
||||
<StagingAreaToolbarDiscardAllButton />
|
||||
</ButtonGroup>
|
||||
|
||||
@@ -0,0 +1,136 @@
|
||||
import { IconButton, Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppStore } from 'app/store/nanostores/store';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { NewLayerIcon } from 'features/controlLayers/components/common/icons';
|
||||
import { selectSelectedImage } from 'features/controlLayers/store/canvasStagingAreaSlice';
|
||||
import { createNewCanvasEntityFromImage } from 'features/imageActions/actions';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiDotsThreeBold } from 'react-icons/pi';
|
||||
import { imageDTOToFile, uploadImage } from 'services/api/endpoints/images';
|
||||
|
||||
const uploadImageArg = { image_category: 'general', is_intermediate: true, silent: true } as const;
|
||||
|
||||
export const StagingAreaToolbarSaveAsMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const selectedImage = useAppSelector(selectSelectedImage);
|
||||
const store = useAppStore();
|
||||
|
||||
const onClickNewRasterLayerFromImage = useCallback(async () => {
|
||||
if (!selectedImage) {
|
||||
return;
|
||||
}
|
||||
const { dispatch, getState } = store;
|
||||
const file = await imageDTOToFile(selectedImage.imageDTO);
|
||||
const imageDTO = await uploadImage({ file, ...uploadImageArg });
|
||||
createNewCanvasEntityFromImage({
|
||||
imageDTO,
|
||||
type: 'raster_layer',
|
||||
dispatch,
|
||||
getState,
|
||||
overrides: { isEnabled: false }, // We are adding the layer while staging, it should be disabled by default
|
||||
});
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
status: 'success',
|
||||
});
|
||||
}, [selectedImage, store, t]);
|
||||
|
||||
const onClickNewControlLayerFromImage = useCallback(async () => {
|
||||
if (!selectedImage) {
|
||||
return;
|
||||
}
|
||||
const { dispatch, getState } = store;
|
||||
const file = await imageDTOToFile(selectedImage.imageDTO);
|
||||
const imageDTO = await uploadImage({ file, ...uploadImageArg });
|
||||
createNewCanvasEntityFromImage({
|
||||
imageDTO,
|
||||
type: 'control_layer',
|
||||
dispatch,
|
||||
getState,
|
||||
overrides: { isEnabled: false }, // We are adding the layer while staging, it should be disabled by default
|
||||
});
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
status: 'success',
|
||||
});
|
||||
}, [selectedImage, store, t]);
|
||||
|
||||
const onClickNewInpaintMaskFromImage = useCallback(async () => {
|
||||
if (!selectedImage) {
|
||||
return;
|
||||
}
|
||||
const { dispatch, getState } = store;
|
||||
const file = await imageDTOToFile(selectedImage.imageDTO);
|
||||
const imageDTO = await uploadImage({ file, ...uploadImageArg });
|
||||
createNewCanvasEntityFromImage({
|
||||
imageDTO,
|
||||
type: 'inpaint_mask',
|
||||
dispatch,
|
||||
getState,
|
||||
overrides: { isEnabled: false }, // We are adding the layer while staging, it should be disabled by default
|
||||
});
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
status: 'success',
|
||||
});
|
||||
}, [selectedImage, store, t]);
|
||||
|
||||
const onClickNewRegionalGuidanceFromImage = useCallback(async () => {
|
||||
if (!selectedImage) {
|
||||
return;
|
||||
}
|
||||
const { dispatch, getState } = store;
|
||||
const file = await imageDTOToFile(selectedImage.imageDTO);
|
||||
const imageDTO = await uploadImage({ file, ...uploadImageArg });
|
||||
createNewCanvasEntityFromImage({
|
||||
imageDTO,
|
||||
type: 'regional_guidance',
|
||||
dispatch,
|
||||
getState,
|
||||
overrides: { isEnabled: false }, // We are adding the layer while staging, it should be disabled by default
|
||||
});
|
||||
toast({
|
||||
id: 'SENT_TO_CANVAS',
|
||||
title: t('toast.sentToCanvas'),
|
||||
status: 'success',
|
||||
});
|
||||
}, [selectedImage, store, t]);
|
||||
|
||||
return (
|
||||
<Menu>
|
||||
<MenuButton
|
||||
as={IconButton}
|
||||
aria-label={t('controlLayers.newLayerFromImage')}
|
||||
tooltip={t('controlLayers.newLayerFromImage')}
|
||||
icon={<PiDotsThreeBold />}
|
||||
colorScheme="invokeBlue"
|
||||
isDisabled={!selectedImage}
|
||||
/>
|
||||
<MenuList>
|
||||
<MenuItem icon={<NewLayerIcon />} onClickCapture={onClickNewInpaintMaskFromImage} isDisabled={!selectedImage}>
|
||||
{t('controlLayers.inpaintMask')}
|
||||
</MenuItem>
|
||||
<MenuItem
|
||||
icon={<NewLayerIcon />}
|
||||
onClickCapture={onClickNewRegionalGuidanceFromImage}
|
||||
isDisabled={!selectedImage}
|
||||
>
|
||||
{t('controlLayers.regionalGuidance')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<NewLayerIcon />} onClickCapture={onClickNewControlLayerFromImage} isDisabled={!selectedImage}>
|
||||
{t('controlLayers.controlLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<NewLayerIcon />} onClickCapture={onClickNewRasterLayerFromImage} isDisabled={!selectedImage}>
|
||||
{t('controlLayers.rasterLayer')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
);
|
||||
});
|
||||
|
||||
StagingAreaToolbarSaveAsMenu.displayName = 'StagingAreaToolbarSaveAsMenu';
|
||||
@@ -7,7 +7,7 @@ import { toast } from 'features/toast/toast';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiFloppyDiskBold } from 'react-icons/pi';
|
||||
import { uploadImage } from 'services/api/endpoints/images';
|
||||
import { imageDTOToFile, uploadImage } from 'services/api/endpoints/images';
|
||||
|
||||
const TOAST_ID = 'SAVE_STAGING_AREA_IMAGE_TO_GALLERY';
|
||||
|
||||
@@ -25,11 +25,8 @@ export const StagingAreaToolbarSaveSelectedToGalleryButton = memo(() => {
|
||||
// To save the image to gallery, we will download it and re-upload it. This allows the user to delete the image
|
||||
// the gallery without borking the canvas, which may need this image to exist.
|
||||
const result = await withResultAsync(async () => {
|
||||
// Download the image
|
||||
const res = await fetch(selectedImage.imageDTO.image_url);
|
||||
const blob = await res.blob();
|
||||
// Create a new file with the same name, which we will upload
|
||||
const file = new File([blob], `copy_of_${selectedImage.imageDTO.image_name}`, { type: 'image/png' });
|
||||
const file = await imageDTOToFile(selectedImage.imageDTO);
|
||||
|
||||
await uploadImage({
|
||||
file,
|
||||
|
||||
@@ -4,8 +4,8 @@ import { CanvasSettingsPopover } from 'features/controlLayers/components/Setting
|
||||
import { ToolColorPicker } from 'features/controlLayers/components/Tool/ToolFillColorPicker';
|
||||
import { ToolSettings } from 'features/controlLayers/components/Tool/ToolSettings';
|
||||
import { CanvasToolbarFitBboxToLayersButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarFitBboxToLayersButton';
|
||||
import { CanvasToolbarNewSessionMenuButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarNewSessionMenuButton';
|
||||
import { CanvasToolbarRedoButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarRedoButton';
|
||||
import { CanvasToolbarResetCanvasButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarResetCanvasButton';
|
||||
import { CanvasToolbarResetViewButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarResetViewButton';
|
||||
import { CanvasToolbarSaveToGalleryButton } from 'features/controlLayers/components/Toolbar/CanvasToolbarSaveToGalleryButton';
|
||||
import { CanvasToolbarScale } from 'features/controlLayers/components/Toolbar/CanvasToolbarScale';
|
||||
@@ -43,7 +43,7 @@ export const CanvasToolbar = memo(() => {
|
||||
<CanvasToolbarSaveToGalleryButton />
|
||||
<CanvasToolbarUndoButton />
|
||||
<CanvasToolbarRedoButton />
|
||||
<CanvasToolbarResetCanvasButton />
|
||||
<CanvasToolbarNewSessionMenuButton />
|
||||
<CanvasSettingsPopover />
|
||||
</Flex>
|
||||
</Flex>
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
import { IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
|
||||
import { SessionMenuItems } from 'common/components/SessionMenuItems';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiFilePlusBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasToolbarNewSessionMenuButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Menu placement="bottom-end">
|
||||
<MenuButton
|
||||
as={IconButton}
|
||||
aria-label={t('controlLayers.newSession')}
|
||||
icon={<PiFilePlusBold />}
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
/>
|
||||
<MenuList>
|
||||
<SessionMenuItems />
|
||||
</MenuList>
|
||||
</Menu>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasToolbarNewSessionMenuButton.displayName = 'CanvasToolbarNewSessionMenuButton';
|
||||
@@ -1,30 +0,0 @@
|
||||
import { IconButton } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { canvasReset } from 'features/controlLayers/store/actions';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiTrashBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasToolbarResetCanvasButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const canvasManager = useCanvasManager();
|
||||
const onClick = useCallback(() => {
|
||||
dispatch(canvasReset());
|
||||
canvasManager.stage.fitLayersToStage();
|
||||
}, [canvasManager.stage, dispatch]);
|
||||
return (
|
||||
<IconButton
|
||||
aria-label={t('controlLayers.resetCanvas')}
|
||||
tooltip={t('controlLayers.resetCanvas')}
|
||||
onClick={onClick}
|
||||
colorScheme="error"
|
||||
icon={<PiTrashBold />}
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasToolbarResetCanvasButton.displayName = 'CanvasToolbarResetCanvasButton';
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityDeleteButton } from 'features/controlLayers/components/common/CanvasEntityDeleteButton';
|
||||
import { CanvasEntityEnabledToggle } from 'features/controlLayers/components/common/CanvasEntityEnabledToggle';
|
||||
import { CanvasEntityHeaderWarnings } from 'features/controlLayers/components/common/CanvasEntityHeaderWarnings';
|
||||
import { CanvasEntityIsBookmarkedForQuickSwitchToggle } from 'features/controlLayers/components/common/CanvasEntityIsBookmarkedForQuickSwitchToggle';
|
||||
import { CanvasEntityIsLockedToggle } from 'features/controlLayers/components/common/CanvasEntityIsLockedToggle';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
@@ -11,6 +12,7 @@ export const CanvasEntityHeaderCommonActions = memo(() => {
|
||||
|
||||
return (
|
||||
<Flex alignSelf="stretch">
|
||||
<CanvasEntityHeaderWarnings />
|
||||
<CanvasEntityIsBookmarkedForQuickSwitchToggle />
|
||||
{entityIdentifier.type !== 'reference_image' && <CanvasEntityIsLockedToggle />}
|
||||
<CanvasEntityEnabledToggle />
|
||||
|
||||
@@ -0,0 +1,101 @@
|
||||
import { Flex, IconButton, ListItem, Text, UnorderedList } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { EMPTY_ARRAY } from 'app/store/constants';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useEntityIsEnabled } from 'features/controlLayers/hooks/useEntityIsEnabled';
|
||||
import { selectModel } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import {
|
||||
getControlLayerWarnings,
|
||||
getGlobalReferenceImageWarnings,
|
||||
getInpaintMaskWarnings,
|
||||
getRasterLayerWarnings,
|
||||
getRegionalGuidanceWarnings,
|
||||
} from 'features/controlLayers/store/validators';
|
||||
import type { TFunction } from 'i18next';
|
||||
import { upperFirst } from 'lodash-es';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiWarningBold } from 'react-icons/pi';
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const buildSelectWarnings = (entityIdentifier: CanvasEntityIdentifier, t: TFunction) => {
|
||||
return createSelector(selectCanvasSlice, selectModel, (canvas, model) => {
|
||||
// This component is used within a <CanvasEntityStateGate /> so we can safely assume that the entity exists.
|
||||
// Should never throw.
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier, 'CanvasEntityHeaderWarnings');
|
||||
|
||||
let warnings: string[] = [];
|
||||
|
||||
const entityType = entity.type;
|
||||
|
||||
if (entityType === 'control_layer') {
|
||||
warnings = getControlLayerWarnings(entity, model);
|
||||
} else if (entityType === 'regional_guidance') {
|
||||
warnings = getRegionalGuidanceWarnings(entity, model);
|
||||
} else if (entityType === 'inpaint_mask') {
|
||||
warnings = getInpaintMaskWarnings(entity, model);
|
||||
} else if (entityType === 'raster_layer') {
|
||||
warnings = getRasterLayerWarnings(entity, model);
|
||||
} else if (entityType === 'reference_image') {
|
||||
warnings = getGlobalReferenceImageWarnings(entity, model);
|
||||
} else {
|
||||
assert<Equals<typeof entityType, never>>(false, 'Unexpected entity type');
|
||||
}
|
||||
|
||||
// Return a stable reference if there are no warnings
|
||||
if (warnings.length === 0) {
|
||||
return EMPTY_ARRAY;
|
||||
}
|
||||
|
||||
return warnings.map((w) => t(w)).map(upperFirst);
|
||||
});
|
||||
};
|
||||
|
||||
export const CanvasEntityHeaderWarnings = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const { t } = useTranslation();
|
||||
const isEnabled = useEntityIsEnabled(entityIdentifier);
|
||||
const selectWarnings = useMemo(() => buildSelectWarnings(entityIdentifier, t), [entityIdentifier, t]);
|
||||
const warnings = useAppSelector(selectWarnings);
|
||||
|
||||
if (warnings.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
// Using IconButton here bc it matches the styling of the actual buttons in the header without any fanagling, but
|
||||
// it's not a button
|
||||
<IconButton
|
||||
as="span"
|
||||
size="sm"
|
||||
variant="link"
|
||||
alignSelf="stretch"
|
||||
aria-label="warnings"
|
||||
tooltip={<TooltipContent warnings={warnings} />}
|
||||
icon={<PiWarningBold />}
|
||||
colorScheme="warning"
|
||||
isDisabled={!isEnabled}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasEntityHeaderWarnings.displayName = 'CanvasEntityHeaderWarnings';
|
||||
|
||||
const TooltipContent = memo((props: { warnings: string[] }) => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Flex flexDir="column">
|
||||
<Text>{t('controlLayers.warnings.problemsFound')}:</Text>
|
||||
<UnorderedList>
|
||||
{props.warnings.map((warning, index) => (
|
||||
<ListItem key={index}>{warning}</ListItem>
|
||||
))}
|
||||
</UnorderedList>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
TooltipContent.displayName = 'TooltipContent';
|
||||
@@ -0,0 +1,20 @@
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectEntityExists } from 'features/controlLayers/store/selectors';
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
/**
|
||||
* A "gate" component that renders its children only if the entity exists in redux state.
|
||||
*/
|
||||
export const CanvasEntityStateGate = memo((props: PropsWithChildren<{ entityIdentifier: CanvasEntityIdentifier }>) => {
|
||||
const selector = useMemo(() => selectEntityExists(props.entityIdentifier), [props.entityIdentifier]);
|
||||
const entityExistsInState = useAppSelector(selector);
|
||||
|
||||
if (!entityExistsInState) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return props.children;
|
||||
});
|
||||
CanvasEntityStateGate.displayName = 'CanvasEntityStateGate';
|
||||
@@ -14,7 +14,7 @@ import {
|
||||
rgPositivePromptChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectBase } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import { selectCanvasSlice, selectEntity } from 'features/controlLayers/store/selectors';
|
||||
import type {
|
||||
CanvasEntityIdentifier,
|
||||
CanvasRegionalGuidanceState,
|
||||
@@ -29,7 +29,13 @@ import { modelConfigsAdapterSelectors, selectModelConfigsQuery } from 'services/
|
||||
import type { ControlNetModelConfig, IPAdapterModelConfig, T2IAdapterModelConfig } from 'services/api/types';
|
||||
import { isControlNetOrT2IAdapterModelConfig, isIPAdapterModelConfig } from 'services/api/types';
|
||||
|
||||
/** @knipignore */
|
||||
/**
|
||||
* Selects the default control adapter configuration based on the model configurations and the base.
|
||||
*
|
||||
* Be sure to clone the output of this selector before modifying it!
|
||||
*
|
||||
* @knipignore
|
||||
*/
|
||||
export const selectDefaultControlAdapter = createSelector(
|
||||
selectModelConfigsQuery,
|
||||
selectBase,
|
||||
@@ -52,6 +58,11 @@ export const selectDefaultControlAdapter = createSelector(
|
||||
}
|
||||
);
|
||||
|
||||
/**
|
||||
* Selects the default IP adapter configuration based on the model configurations and the base.
|
||||
*
|
||||
* Be sure to clone the output of this selector before modifying it!
|
||||
*/
|
||||
export const selectDefaultIPAdapter = createSelector(
|
||||
selectModelConfigsQuery,
|
||||
selectBase,
|
||||
@@ -117,7 +128,9 @@ export const useAddRegionalReferenceImage = () => {
|
||||
|
||||
const func = useCallback(() => {
|
||||
const overrides: Partial<CanvasRegionalGuidanceState> = {
|
||||
referenceImages: [{ id: getPrefixedId('regional_guidance_reference_image'), ipAdapter: defaultIPAdapter }],
|
||||
referenceImages: [
|
||||
{ id: getPrefixedId('regional_guidance_reference_image'), ipAdapter: deepClone(defaultIPAdapter) },
|
||||
],
|
||||
};
|
||||
dispatch(rgAdded({ isSelected: true, overrides }));
|
||||
}, [defaultIPAdapter, dispatch]);
|
||||
@@ -129,7 +142,7 @@ export const useAddGlobalReferenceImage = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const defaultIPAdapter = useAppSelector(selectDefaultIPAdapter);
|
||||
const func = useCallback(() => {
|
||||
const overrides = { ipAdapter: defaultIPAdapter };
|
||||
const overrides = { ipAdapter: deepClone(defaultIPAdapter) };
|
||||
dispatch(referenceImageAdded({ isSelected: true, overrides }));
|
||||
}, [defaultIPAdapter, dispatch]);
|
||||
|
||||
@@ -140,7 +153,7 @@ export const useAddRegionalGuidanceIPAdapter = (entityIdentifier: CanvasEntityId
|
||||
const dispatch = useAppDispatch();
|
||||
const defaultIPAdapter = useAppSelector(selectDefaultIPAdapter);
|
||||
const func = useCallback(() => {
|
||||
dispatch(rgIPAdapterAdded({ entityIdentifier, overrides: { ipAdapter: defaultIPAdapter } }));
|
||||
dispatch(rgIPAdapterAdded({ entityIdentifier, overrides: { ipAdapter: deepClone(defaultIPAdapter) } }));
|
||||
}, [defaultIPAdapter, dispatch, entityIdentifier]);
|
||||
|
||||
return func;
|
||||
@@ -168,7 +181,7 @@ export const buildSelectValidRegionalGuidanceActions = (
|
||||
entityIdentifier: CanvasEntityIdentifier<'regional_guidance'>
|
||||
) => {
|
||||
return createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier);
|
||||
const entity = selectEntity(canvas, entityIdentifier);
|
||||
return {
|
||||
canAddPositivePrompt: entity?.positivePrompt === null,
|
||||
canAddNegativePrompt: entity?.negativePrompt === null,
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useEffect, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { $invocationProgressMessage } from 'services/events/stores';
|
||||
|
||||
export const useDeferredModelLoadingInvocationProgressMessage = () => {
|
||||
const { t } = useTranslation();
|
||||
const invocationProgressMessage = useStore($invocationProgressMessage);
|
||||
const [delayedMessage, setDelayedMessage] = useState<string | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
if (!invocationProgressMessage) {
|
||||
setDelayedMessage(null);
|
||||
return;
|
||||
}
|
||||
|
||||
if (invocationProgressMessage && !invocationProgressMessage.startsWith('Loading model')) {
|
||||
setDelayedMessage(null);
|
||||
return;
|
||||
}
|
||||
|
||||
// Set a timeout to update delayedMessage after 5 seconds
|
||||
const timer = setTimeout(() => {
|
||||
setDelayedMessage(`${t('common.loadingModel')}...`);
|
||||
}, 5000);
|
||||
|
||||
return () => clearTimeout(timer); // Cleanup on effect re-run
|
||||
}, [invocationProgressMessage, t]);
|
||||
|
||||
return delayedMessage;
|
||||
};
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user