mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-21 12:47:57 -05:00
Compare commits
281 Commits
v5.6.0
...
maryhipp/m
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
59bd6b935d | ||
|
|
4bba7de070 | ||
|
|
e1f2b232c8 | ||
|
|
2c5b0195fc | ||
|
|
56792b2d2c | ||
|
|
d71e8b4980 | ||
|
|
ca50f8193c | ||
|
|
7ee636b68b | ||
|
|
926f69677a | ||
|
|
675ac348de | ||
|
|
62e5b9da18 | ||
|
|
65eabde297 | ||
|
|
6bebd2bfc8 | ||
|
|
cd785ba64b | ||
|
|
726b4637db | ||
|
|
b50241fe6a | ||
|
|
5b8735db3b | ||
|
|
ce286363d0 | ||
|
|
2fa47cf270 | ||
|
|
3446486f40 | ||
|
|
a0cdcdef57 | ||
|
|
abbb3609c8 | ||
|
|
700ad78f87 | ||
|
|
cfb08f326e | ||
|
|
aae4fa3cca | ||
|
|
109adc5a93 | ||
|
|
acb7ef8837 | ||
|
|
3c5e829c72 | ||
|
|
10d9e75391 | ||
|
|
b6a892a673 | ||
|
|
479d5cc362 | ||
|
|
01e4fd100f | ||
|
|
8ecf9fb7e3 | ||
|
|
436d5ee0c6 | ||
|
|
0671fec844 | ||
|
|
4dbde53f9b | ||
|
|
f6c4682b99 | ||
|
|
b3288ed64e | ||
|
|
f3dfb1b6ea | ||
|
|
65a37ca4ff | ||
|
|
9adbe31fec | ||
|
|
0a2925f02b | ||
|
|
877dcc73c3 | ||
|
|
aec2136323 | ||
|
|
8ef5c54ffe | ||
|
|
6faed4f1ec | ||
|
|
aa71db4d31 | ||
|
|
6407ab4a2e | ||
|
|
a91b0f25cb | ||
|
|
ef664863b5 | ||
|
|
bf8ba1bb37 | ||
|
|
54747bd521 | ||
|
|
d040a6953f | ||
|
|
828497cf89 | ||
|
|
28950a4891 | ||
|
|
1c92838bf9 | ||
|
|
71f6737e19 | ||
|
|
dcac65f46b | ||
|
|
46f549a57a | ||
|
|
fb93101085 | ||
|
|
9aabcfa4b8 | ||
|
|
64587b37db | ||
|
|
c673b6e11d | ||
|
|
a3a49ddda0 | ||
|
|
330a0f0028 | ||
|
|
1104d2a00f | ||
|
|
aed802fa74 | ||
|
|
498d99c828 | ||
|
|
3d19b98208 | ||
|
|
85f5bb4a02 | ||
|
|
269f718d2c | ||
|
|
211bb8a204 | ||
|
|
ef0ef875dd | ||
|
|
9c62648283 | ||
|
|
4ca45f7651 | ||
|
|
2abe2f52f7 | ||
|
|
6f1c814af4 | ||
|
|
1ad6ccc426 | ||
|
|
aedee536a0 | ||
|
|
d2b15fba12 | ||
|
|
a674e781a1 | ||
|
|
0db74f0cde | ||
|
|
d66db67d1a | ||
|
|
2507a7f674 | ||
|
|
145503a0a0 | ||
|
|
32e8dd5647 | ||
|
|
fe87adcb52 | ||
|
|
e95255f6e8 | ||
|
|
efec224523 | ||
|
|
e948e236e7 | ||
|
|
189eb85663 | ||
|
|
94f90f4082 | ||
|
|
1eb491fdaa | ||
|
|
176248a023 | ||
|
|
3c676ed11a | ||
|
|
7a9340b850 | ||
|
|
2c0b474f55 | ||
|
|
74c76611a9 | ||
|
|
1c7176b3f4 | ||
|
|
30363a0018 | ||
|
|
b46dbcc76d | ||
|
|
09879f4e19 | ||
|
|
4daa82c912 | ||
|
|
1cb04d9a4a | ||
|
|
3e6969128c | ||
|
|
e14c490ac6 | ||
|
|
3ef3b97c58 | ||
|
|
3baaefb0cc | ||
|
|
98b0a8ffb2 | ||
|
|
4f85bf078a | ||
|
|
f0563d41db | ||
|
|
a7a71ca935 | ||
|
|
c04822054b | ||
|
|
132e9bebd7 | ||
|
|
0dc45ac903 | ||
|
|
4f9d81917c | ||
|
|
d3c22eceaf | ||
|
|
fb77d271ab | ||
|
|
0371881349 | ||
|
|
4b178fdeca | ||
|
|
b53e36aaaa | ||
|
|
c061cd5e54 | ||
|
|
ddda915ebd | ||
|
|
9a2d8844a2 | ||
|
|
48583df02e | ||
|
|
f9432d10d2 | ||
|
|
0d28cd7ebe | ||
|
|
c9f9a2f2d4 | ||
|
|
a05d10f648 | ||
|
|
14845932fb | ||
|
|
2aa1fc9301 | ||
|
|
98139562f3 | ||
|
|
8365bba5ba | ||
|
|
9f07e83a23 | ||
|
|
1f995d0257 | ||
|
|
6ae2d5ef9d | ||
|
|
55973b4c66 | ||
|
|
d8c6531b70 | ||
|
|
81e385a756 | ||
|
|
f6cb1a455f | ||
|
|
bf60be99dc | ||
|
|
bee0e8248f | ||
|
|
1e658cf9e7 | ||
|
|
f130fa4d66 | ||
|
|
02a47a6806 | ||
|
|
1063498458 | ||
|
|
e9a13ec882 | ||
|
|
bd0765b744 | ||
|
|
6e1388f4fc | ||
|
|
2a9f2b2fe2 | ||
|
|
0a6b0dc3bf | ||
|
|
8753406a6c | ||
|
|
e2b09bed62 | ||
|
|
011910a08c | ||
|
|
bfd70be50b | ||
|
|
9c53bd6a3b | ||
|
|
e479cb5fe4 | ||
|
|
52947f40c3 | ||
|
|
bce9a23b25 | ||
|
|
2d05579568 | ||
|
|
11aabb5693 | ||
|
|
1e1e31d5b7 | ||
|
|
fe86cf6d99 | ||
|
|
cfb63c1b81 | ||
|
|
b44415415a | ||
|
|
9353298b4f | ||
|
|
cf22e09b28 | ||
|
|
6e5ca7ece8 | ||
|
|
b81209e751 | ||
|
|
c4040eb2f0 | ||
|
|
046ea611f9 | ||
|
|
1439da5e88 | ||
|
|
69a504710f | ||
|
|
842b770938 | ||
|
|
ba39331594 | ||
|
|
8ee9509eec | ||
|
|
7b5dcffb3f | ||
|
|
6927e95444 | ||
|
|
76618fee9c | ||
|
|
b51312f1ba | ||
|
|
c2b71854be | ||
|
|
df793c898f | ||
|
|
d6181e4d64 | ||
|
|
0a4ea9ac6f | ||
|
|
9e6f3e9338 | ||
|
|
d3a40d85b9 | ||
|
|
b224cc8158 | ||
|
|
b75d08a2d0 | ||
|
|
5f1a30ea82 | ||
|
|
d09e600802 | ||
|
|
f4ee59b92a | ||
|
|
ad0b40b669 | ||
|
|
f3fbcf0014 | ||
|
|
588e8a0195 | ||
|
|
c194281f4d | ||
|
|
7daff465d3 | ||
|
|
0747a5f464 | ||
|
|
e7aafdfdbf | ||
|
|
ecb38c2bae | ||
|
|
d3ef94cb3e | ||
|
|
eb27b437ee | ||
|
|
25bb96ed66 | ||
|
|
a9568e00a7 | ||
|
|
c8a5d3bbf9 | ||
|
|
ed2b2868ce | ||
|
|
35de49aa01 | ||
|
|
8bac8d3d3a | ||
|
|
e63bd26b19 | ||
|
|
91ded4bd15 | ||
|
|
1656d3dd21 | ||
|
|
fe67dfefab | ||
|
|
6420882a5b | ||
|
|
568e3bd714 | ||
|
|
d9c2115396 | ||
|
|
3e13249983 | ||
|
|
2c2ee7fe20 | ||
|
|
50cb27cd0b | ||
|
|
d66cd4e81b | ||
|
|
8556a2558e | ||
|
|
2fb35d25dd | ||
|
|
a8eb47769a | ||
|
|
592e45a078 | ||
|
|
c5e5641f0e | ||
|
|
dfb9e300d4 | ||
|
|
d7f80fc299 | ||
|
|
c9b1eb2d83 | ||
|
|
13d505a621 | ||
|
|
6674d95dae | ||
|
|
c1f5383e63 | ||
|
|
71690715db | ||
|
|
641489c2f8 | ||
|
|
5f0bd2e1db | ||
|
|
98b8ab0147 | ||
|
|
50bf5b7f44 | ||
|
|
0184cb27c4 | ||
|
|
c374ab24cb | ||
|
|
6313ab6a40 | ||
|
|
a4d58aab09 | ||
|
|
b74fb40cbc | ||
|
|
47dc954385 | ||
|
|
8fc5d3dd20 | ||
|
|
6f1a198af4 | ||
|
|
9c7bac693b | ||
|
|
8c9fc45341 | ||
|
|
f93571f7ef | ||
|
|
cc27730cb4 | ||
|
|
fdf9740f3c | ||
|
|
58255ab7ba | ||
|
|
64475b8f21 | ||
|
|
cc9d215a9b | ||
|
|
f7315f0432 | ||
|
|
285313b282 | ||
|
|
debcbd6e2c | ||
|
|
229834a5e8 | ||
|
|
6c919e1bca | ||
|
|
5357d6e08e | ||
|
|
7fef569e38 | ||
|
|
e7fb435cc5 | ||
|
|
5d472ac1b8 | ||
|
|
28514ba59a | ||
|
|
5ea7953537 | ||
|
|
0db6639b4b | ||
|
|
b8eed2bdcb | ||
|
|
1054283f5c | ||
|
|
f4a0b78a8d | ||
|
|
409b69ee5d | ||
|
|
206f261e45 | ||
|
|
7eee4da896 | ||
|
|
908976ac08 | ||
|
|
dfa253e75b | ||
|
|
4f369e3dfb | ||
|
|
faa4fa02c0 | ||
|
|
5bd6428fdd | ||
|
|
8b4f411f7b | ||
|
|
9d2f8b4ac8 | ||
|
|
80c3d8bc5c | ||
|
|
b681132da4 | ||
|
|
f60a5a5015 | ||
|
|
6efd108481 | ||
|
|
f88c1ba0c3 | ||
|
|
e2f05d0800 |
@@ -58,7 +58,7 @@ RUN --mount=type=cache,target=/home/ubuntu/.cache/uv,uid=1000,gid=1000 \
|
||||
|
||||
#### Build the Web UI ------------------------------------
|
||||
|
||||
FROM node:20-slim AS web-builder
|
||||
FROM docker.io/node:22-slim AS web-builder
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
ENV PATH="$PNPM_HOME:$PATH"
|
||||
RUN corepack use pnpm@8.x
|
||||
|
||||
183
docs/faq.md
183
docs/faq.md
@@ -1,26 +1,18 @@
|
||||
# FAQ
|
||||
|
||||
!!! info "How to Reinstall"
|
||||
|
||||
Many issues can be resolved by re-installing the application. You won't lose any data by re-installing. We suggest downloading the [latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) and using it to re-install the application. Consult the [installer guide](./installation/installer.md) for more information.
|
||||
|
||||
When you run the installer, you'll have an option to select the version to install. If you aren't ready to upgrade, you choose the current version to fix a broken install.
|
||||
|
||||
If the troubleshooting steps on this page don't get you up and running, please either [create an issue] or hop on [discord] for help.
|
||||
|
||||
## How to Install
|
||||
|
||||
You can download the latest installers [here](https://github.com/invoke-ai/InvokeAI/releases).
|
||||
|
||||
Note that any releases marked as _pre-release_ are in a beta state. You may experience some issues, but we appreciate your help testing those! For stable/reliable installations, please install the [latest release].
|
||||
Follow the [Quick Start guide](./installation/quick_start.md) to install Invoke.
|
||||
|
||||
## Downloading models and using existing models
|
||||
|
||||
The Model Manager tab in the UI provides a few ways to install models, including using your already-downloaded models. You'll see a popup directing you there on first startup. For more information, see the [model install docs].
|
||||
|
||||
## Missing models after updating to v4
|
||||
## Missing models after updating from v3
|
||||
|
||||
If you find some models are missing after updating to v4, it's likely they weren't correctly registered before the update and didn't get picked up in the migration.
|
||||
If you find some models are missing after updating from v3, it's likely they weren't correctly registered before the update and didn't get picked up in the migration.
|
||||
|
||||
You can use the `Scan Folder` tab in the Model Manager UI to fix this. The models will either be in the old, now-unused `autoimport` folder, or your `models` folder.
|
||||
|
||||
@@ -37,115 +29,27 @@ Follow the same steps to scan and import the missing models.
|
||||
## Slow generation
|
||||
|
||||
- Check the [system requirements] to ensure that your system is capable of generating images.
|
||||
- Check the `ram` setting in `invokeai.yaml`. This setting tells Invoke how much of your system RAM can be used to cache models. Having this too high or too low can slow things down. That said, it's generally safest to not set this at all and instead let Invoke manage it.
|
||||
- Check the `vram` setting in `invokeai.yaml`. This setting tells Invoke how much of your GPU VRAM can be used to cache models. Counter-intuitively, if this setting is too high, Invoke will need to do a lot of shuffling of models as it juggles the VRAM cache and the currently-loaded model. The default value of 0.25 is generally works well for GPUs without 16GB or more VRAM. Even on a 24GB card, the default works well.
|
||||
- Check that your generations are happening on your GPU (if you have one). InvokeAI will log what is being used for generation upon startup. If your GPU isn't used, re-install to ensure the correct versions of torch get installed.
|
||||
- If you are on Windows, you may have exceeded your GPU's VRAM capacity and are using slower [shared GPU memory](#shared-gpu-memory-windows). There's a guide to opt out of this behaviour in the linked FAQ entry.
|
||||
|
||||
## Shared GPU Memory (Windows)
|
||||
|
||||
!!! tip "Nvidia GPUs with driver 536.40"
|
||||
|
||||
This only applies to current Nvidia cards with driver 536.40 or later, released in June 2023.
|
||||
|
||||
When the GPU doesn't have enough VRAM for a task, Windows is able to allocate some of its CPU RAM to the GPU. This is much slower than VRAM, but it does allow the system to generate when it otherwise might no have enough VRAM.
|
||||
|
||||
When shared GPU memory is used, generation slows down dramatically - but at least it doesn't crash.
|
||||
|
||||
If you'd like to opt out of this behavior and instead get an error when you exceed your GPU's VRAM, follow [this guide from Nvidia](https://nvidia.custhelp.com/app/answers/detail/a_id/5490).
|
||||
|
||||
Here's how to get the python path required in the linked guide:
|
||||
|
||||
- Run `invoke.bat`.
|
||||
- Select option 2 for developer console.
|
||||
- At least one python path will be printed. Copy the path that includes your invoke installation directory (typically the first).
|
||||
|
||||
## Installer cannot find python (Windows)
|
||||
|
||||
Ensure that you checked **Add python.exe to PATH** when installing Python. This can be found at the bottom of the Python Installer window. If you already have Python installed, you can re-run the python installer, choose the Modify option and check the box.
|
||||
- Follow the [Low-VRAM mode guide](./features/low-vram.md) to optimize performance.
|
||||
- Check that your generations are happening on your GPU (if you have one). Invoke will log what is being used for generation upon startup. If your GPU isn't used, re-install to and ensure you select the appropriate GPU option.
|
||||
- If you are on Windows with an Nvidia GPU, you may have exceeded your GPU's VRAM capacity and are triggering Nvidia's "sysmem fallback". There's a guide to opt out of this behaviour in the [Low-VRAM mode guide](./features/low-vram.md).
|
||||
|
||||
## Triton error on startup
|
||||
|
||||
This can be safely ignored. InvokeAI doesn't use Triton, but if you are on Linux and wish to dismiss the error, you can install Triton.
|
||||
This can be safely ignored. Invoke doesn't use Triton, but if you are on Linux and wish to dismiss the error, you can install Triton.
|
||||
|
||||
## Updated to 3.4.0 and xformers can’t load C++/CUDA
|
||||
## Unable to Copy on Firefox
|
||||
|
||||
An issue occurred with your PyTorch update. Follow these steps to fix :
|
||||
Firefox does not allow Invoke to directly access the clipboard by default. As a result, you may be unable to use certain copy functions. You can fix this by configuring Firefox to allow access to write to the clipboard:
|
||||
|
||||
1. Launch your invoke.bat / invoke.sh and select the option to open the developer console
|
||||
2. Run:`pip install ".[xformers]" --upgrade --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu121`
|
||||
- If you run into an error with `typing_extensions`, re-open the developer console and run: `pip install -U typing-extensions`
|
||||
|
||||
Note that v3.4.0 is an old, unsupported version. Please upgrade to the [latest release].
|
||||
|
||||
## Install failed and says `pip` is out of date
|
||||
|
||||
An out of date `pip` typically won't cause an installation to fail. The cause of the error can likely be found above the message that says `pip` is out of date.
|
||||
|
||||
If you saw that warning but the install went well, don't worry about it (but you can update `pip` afterwards if you'd like).
|
||||
- Go to `about:config` and click the Accept button
|
||||
- Search for `dom.events.asyncClipboard.clipboardItem`
|
||||
- Set it to `true` by clicking the toggle button
|
||||
- Restart Firefox
|
||||
|
||||
## Replicate image found online
|
||||
|
||||
Most example images with prompts that you'll find on the internet have been generated using different software, so you can't expect to get identical results. In order to reproduce an image, you need to replicate the exact settings and processing steps, including (but not limited to) the model, the positive and negative prompts, the seed, the sampler, the exact image size, any upscaling steps, etc.
|
||||
|
||||
## OSErrors on Windows while installing dependencies
|
||||
|
||||
During a zip file installation or an update, installation stops with an error like this:
|
||||
|
||||
{:width="800px"}
|
||||
|
||||
To resolve this, re-install the application as described above.
|
||||
|
||||
## HuggingFace install failed due to invalid access token
|
||||
|
||||
Some HuggingFace models require you to authenticate using an [access token].
|
||||
|
||||
Invoke doesn't manage this token for you, but it's easy to set it up:
|
||||
|
||||
- Follow the instructions in the link above to create an access token. Copy it.
|
||||
- Run the launcher script.
|
||||
- Select option 2 (developer console).
|
||||
- Paste the following command:
|
||||
|
||||
```sh
|
||||
python -c "import huggingface_hub; huggingface_hub.login()"
|
||||
```
|
||||
|
||||
- Paste your access token when prompted and press Enter. You won't see anything when you paste it.
|
||||
- Type `n` if prompted about git credentials.
|
||||
|
||||
If you get an error, try the command again - maybe the token didn't paste correctly.
|
||||
|
||||
Once your token is set, start Invoke and try downloading the model again. The installer will automatically use the access token.
|
||||
|
||||
If the install still fails, you may not have access to the model.
|
||||
|
||||
## Stable Diffusion XL generation fails after trying to load UNet
|
||||
|
||||
InvokeAI is working in other respects, but when trying to generate
|
||||
images with Stable Diffusion XL you get a "Server Error". The text log
|
||||
in the launch window contains this log line above several more lines of
|
||||
error messages:
|
||||
|
||||
`INFO --> Loading model:D:\LONG\PATH\TO\MODEL, type sdxl:main:unet`
|
||||
|
||||
This failure mode occurs when there is a network glitch during
|
||||
downloading the very large SDXL model.
|
||||
|
||||
To address this, first go to the Model Manager and delete the
|
||||
Stable-Diffusion-XL-base-1.X model. Then, click the HuggingFace tab,
|
||||
paste the Repo ID stabilityai/stable-diffusion-xl-base-1.0 and install
|
||||
the model.
|
||||
|
||||
## Package dependency conflicts during installation or update
|
||||
|
||||
If you have previously installed InvokeAI or another Stable Diffusion
|
||||
package, the installer may occasionally pick up outdated libraries and
|
||||
either the installer or `invoke` will fail with complaints about
|
||||
library conflicts.
|
||||
|
||||
To resolve this, re-install the application as described above.
|
||||
|
||||
## Invalid configuration file
|
||||
|
||||
Everything seems to install ok, you get a `ValidationError` when starting up the app.
|
||||
@@ -154,64 +58,9 @@ This is caused by an invalid setting in the `invokeai.yaml` configuration file.
|
||||
|
||||
Check the [configuration docs] for more detail about the settings and how to specify them.
|
||||
|
||||
## `ModuleNotFoundError: No module named 'controlnet_aux'`
|
||||
## Out of Memory Errors
|
||||
|
||||
`controlnet_aux` is a dependency of Invoke and appears to have been packaged or distributed strangely. Sometimes, it doesn't install correctly. This is outside our control.
|
||||
|
||||
If you encounter this error, the solution is to remove the package from the `pip` cache and re-run the Invoke installer so a fresh, working version of `controlnet_aux` can be downloaded and installed:
|
||||
|
||||
- Run the Invoke launcher
|
||||
- Choose the developer console option
|
||||
- Run this command: `pip cache remove controlnet_aux`
|
||||
- Close the terminal window
|
||||
- Download and run the [installer][latest release], selecting your current install location
|
||||
|
||||
## Out of Memory Issues
|
||||
|
||||
The models are large, VRAM is expensive, and you may find yourself
|
||||
faced with Out of Memory errors when generating images. Here are some
|
||||
tips to reduce the problem:
|
||||
|
||||
!!! info "Optimizing for GPU VRAM"
|
||||
|
||||
=== "4GB VRAM GPU"
|
||||
|
||||
This should be adequate for 512x512 pixel images using Stable Diffusion 1.5
|
||||
and derived models, provided that you do not use the NSFW checker. It won't be loaded unless you go into the UI settings and turn it on.
|
||||
|
||||
If you are on a CUDA-enabled GPU, we will automatically use xformers or torch-sdp to reduce VRAM requirements, though you can explicitly configure this. See the [configuration docs].
|
||||
|
||||
=== "6GB VRAM GPU"
|
||||
|
||||
This is a border case. Using the SD 1.5 series you should be able to
|
||||
generate images up to 640x640 with the NSFW checker enabled, and up to
|
||||
1024x1024 with it disabled.
|
||||
|
||||
If you run into persistent memory issues there are a series of
|
||||
environment variables that you can set before launching InvokeAI that
|
||||
alter how the PyTorch machine learning library manages memory. See
|
||||
<https://pytorch.org/docs/stable/notes/cuda.html#memory-management> for
|
||||
a list of these tweaks.
|
||||
|
||||
=== "12GB VRAM GPU"
|
||||
|
||||
This should be sufficient to generate larger images up to about 1280x1280.
|
||||
|
||||
## Checkpoint Models Load Slowly or Use Too Much RAM
|
||||
|
||||
The difference between diffusers models (a folder containing multiple
|
||||
subfolders) and checkpoint models (a file ending with .safetensors or
|
||||
.ckpt) is that InvokeAI is able to load diffusers models into memory
|
||||
incrementally, while checkpoint models must be loaded all at
|
||||
once. With very large models, or systems with limited RAM, you may
|
||||
experience slowdowns and other memory-related issues when loading
|
||||
checkpoint models.
|
||||
|
||||
To solve this, go to the Model Manager tab (the cube), select the
|
||||
checkpoint model that's giving you trouble, and press the "Convert"
|
||||
button in the upper right of your browser window. This will convert the
|
||||
checkpoint into a diffusers model, after which loading should be
|
||||
faster and less memory-intensive.
|
||||
The models are large, VRAM is expensive, and you may find yourself faced with Out of Memory errors when generating images. Follow our [Low-VRAM mode guide](./features/low-vram.md) to configure Invoke to prevent these.
|
||||
|
||||
## Memory Leak (Linux)
|
||||
|
||||
@@ -253,8 +102,6 @@ Note the differences between memory allocated as chunks in an arena vs. memory a
|
||||
|
||||
[model install docs]: ./installation/models.md
|
||||
[system requirements]: ./installation/requirements.md
|
||||
[latest release]: https://github.com/invoke-ai/InvokeAI/releases/latest
|
||||
[create an issue]: https://github.com/invoke-ai/InvokeAI/issues
|
||||
[discord]: https://discord.gg/ZmtBAhwWhy
|
||||
[configuration docs]: ./configuration.md
|
||||
[access token]: https://huggingface.co/docs/hub/security-tokens#how-to-manage-user-access-tokens
|
||||
|
||||
@@ -88,13 +88,13 @@ The following commands vary depending on the version of Invoke being installed a
|
||||
8. Install the `invokeai` package. Substitute the package specifier and version.
|
||||
|
||||
```sh
|
||||
uv pip install <PACKAGE_SPECIFIER>=<VERSION> --python 3.11 --python-preference only-managed --force-reinstall
|
||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.11 --python-preference only-managed --force-reinstall
|
||||
```
|
||||
|
||||
If you determined you needed to use a `PyPI` index URL in the previous step, you'll need to add `--index=<INDEX_URL>` like this:
|
||||
|
||||
```sh
|
||||
uv pip install <PACKAGE_SPECIFIER>=<VERSION> --python 3.11 --python-preference only-managed --index=<INDEX_URL> --force-reinstall
|
||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.11 --python-preference only-managed --index=<INDEX_URL> --force-reinstall
|
||||
```
|
||||
|
||||
9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:
|
||||
|
||||
@@ -99,6 +99,20 @@ We recommend watching our [Getting Started Playlist](https://www.youtube.com/pla
|
||||
- Using control layers and reference guides.
|
||||
- Refining images with advanced workflows.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If installation fails, retrying the install in Repair Mode may fix it. There's a checkbox to enable this on the Review step of the install flow.
|
||||
|
||||
If that doesn't fix it, [clearing the `uv` cache](https://docs.astral.sh/uv/reference/cli/#uv-cache-clean) might do the trick:
|
||||
|
||||
- Open and start the dev console (button at the bottom-left of the launcher).
|
||||
- Run `uv cache clean`.
|
||||
- Retry the installation. Enable Repair Mode for good measure.
|
||||
|
||||
If you are still unable to install, try installing to a different location and see if that works.
|
||||
|
||||
If you still have problems, ask for help on the Invoke [discord](https://discord.gg/ZmtBAhwWhy).
|
||||
|
||||
## Other Installation Methods
|
||||
|
||||
- You can install the Invoke application as a python package. See our [manual install](./manual.md) docs.
|
||||
|
||||
@@ -4,7 +4,9 @@ Invoke runs on Windows 10+, macOS 14+ and Linux (Ubuntu 20.04+ is well-tested).
|
||||
|
||||
## Hardware
|
||||
|
||||
Hardware requirements vary significantly depending on model and image output size. The requirements below are rough guidelines.
|
||||
Hardware requirements vary significantly depending on model and image output size.
|
||||
|
||||
The requirements below are rough guidelines for best performance. GPUs with less VRAM typically still work, if a bit slower. Follow the [Low-VRAM mode guide](./features/low-vram.md) to optimize performance.
|
||||
|
||||
- All Apple Silicon (M1, M2, etc) Macs work, but 16GB+ memory is recommended.
|
||||
- AMD GPUs are supported on Linux only. The VRAM requirements are the same as Nvidia GPUs.
|
||||
|
||||
@@ -858,6 +858,18 @@ async def get_stats() -> Optional[CacheStats]:
|
||||
return ApiDependencies.invoker.services.model_manager.load.ram_cache.stats
|
||||
|
||||
|
||||
@model_manager_router.post(
|
||||
"/empty_model_cache",
|
||||
operation_id="empty_model_cache",
|
||||
status_code=200,
|
||||
)
|
||||
async def empty_model_cache() -> None:
|
||||
"""Drop all models from the model cache to free RAM/VRAM. 'Locked' models that are in active use will not be dropped."""
|
||||
# Request 1000GB of room in order to force the cache to drop all models.
|
||||
ApiDependencies.invoker.services.logger.info("Emptying model cache.")
|
||||
ApiDependencies.invoker.services.model_manager.load.ram_cache.make_room(1000 * 2**30)
|
||||
|
||||
|
||||
class HFTokenStatus(str, Enum):
|
||||
VALID = "valid"
|
||||
INVALID = "invalid"
|
||||
|
||||
@@ -10,11 +10,13 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
QUEUE_ITEM_STATUS,
|
||||
Batch,
|
||||
BatchStatus,
|
||||
CancelAllExceptCurrentResult,
|
||||
CancelByBatchIDsResult,
|
||||
CancelByDestinationResult,
|
||||
ClearResult,
|
||||
EnqueueBatchResult,
|
||||
PruneResult,
|
||||
RetryItemsResult,
|
||||
SessionQueueCountsByDestination,
|
||||
SessionQueueItem,
|
||||
SessionQueueItemDTO,
|
||||
@@ -94,6 +96,18 @@ async def Pause(
|
||||
return ApiDependencies.invoker.services.session_processor.pause()
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/cancel_all_except_current",
|
||||
operation_id="cancel_all_except_current",
|
||||
responses={200: {"model": CancelAllExceptCurrentResult}},
|
||||
)
|
||||
async def cancel_all_except_current(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> CancelAllExceptCurrentResult:
|
||||
"""Immediately cancels all queue items except in-processing items"""
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_all_except_current(queue_id=queue_id)
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/cancel_by_batch_ids",
|
||||
operation_id="cancel_by_batch_ids",
|
||||
@@ -122,6 +136,19 @@ async def cancel_by_destination(
|
||||
)
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/retry_items_by_id",
|
||||
operation_id="retry_items_by_id",
|
||||
responses={200: {"model": RetryItemsResult}},
|
||||
)
|
||||
async def retry_items_by_id(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
item_ids: list[int] = Body(description="The queue item ids to retry"),
|
||||
) -> RetryItemsResult:
|
||||
"""Immediately cancels all queue items with the given origin"""
|
||||
return ApiDependencies.invoker.services.session_queue.retry_items_by_id(queue_id=queue_id, item_ids=item_ids)
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/clear",
|
||||
operation_id="clear",
|
||||
|
||||
@@ -898,7 +898,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
|
||||
### inpaint
|
||||
mask, masked_latents, is_gradient_mask = self.prep_inpaint_mask(context, latents)
|
||||
# NOTE: We used to identify inpainting models by inpecting the shape of the loaded UNet model weights. Now we
|
||||
# NOTE: We used to identify inpainting models by inspecting the shape of the loaded UNet model weights. Now we
|
||||
# use the ModelVariantType config. During testing, there was a report of a user with models that had an
|
||||
# incorrect ModelVariantType value. Re-installing the model fixed the issue. If this issue turns out to be
|
||||
# prevalent, we will have to revisit how we initialize the inpainting extensions.
|
||||
|
||||
@@ -8,7 +8,7 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import CLIPField, LoRAField, ModelIdentifierField, TransformerField
|
||||
from invokeai.app.invocations.model import CLIPField, LoRAField, ModelIdentifierField, T5EncoderField, TransformerField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import BaseModelType
|
||||
|
||||
@@ -21,6 +21,9 @@ class FluxLoRALoaderOutput(BaseInvocationOutput):
|
||||
default=None, description=FieldDescriptions.transformer, title="FLUX Transformer"
|
||||
)
|
||||
clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP")
|
||||
t5_encoder: Optional[T5EncoderField] = OutputField(
|
||||
default=None, description=FieldDescriptions.t5_encoder, title="T5 Encoder"
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -28,7 +31,7 @@ class FluxLoRALoaderOutput(BaseInvocationOutput):
|
||||
title="FLUX LoRA",
|
||||
tags=["lora", "model", "flux"],
|
||||
category="model",
|
||||
version="1.1.0",
|
||||
version="1.2.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxLoRALoaderInvocation(BaseInvocation):
|
||||
@@ -50,6 +53,12 @@ class FluxLoRALoaderInvocation(BaseInvocation):
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
)
|
||||
t5_encoder: T5EncoderField | None = InputField(
|
||||
default=None,
|
||||
title="T5 Encoder",
|
||||
description=FieldDescriptions.t5_encoder,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxLoRALoaderOutput:
|
||||
lora_key = self.lora.key
|
||||
@@ -62,6 +71,8 @@ class FluxLoRALoaderInvocation(BaseInvocation):
|
||||
raise ValueError(f'LoRA "{lora_key}" already applied to transformer.')
|
||||
if self.clip and any(lora.lora.key == lora_key for lora in self.clip.loras):
|
||||
raise ValueError(f'LoRA "{lora_key}" already applied to CLIP encoder.')
|
||||
if self.t5_encoder and any(lora.lora.key == lora_key for lora in self.t5_encoder.loras):
|
||||
raise ValueError(f'LoRA "{lora_key}" already applied to T5 encoder.')
|
||||
|
||||
output = FluxLoRALoaderOutput()
|
||||
|
||||
@@ -82,6 +93,14 @@ class FluxLoRALoaderInvocation(BaseInvocation):
|
||||
weight=self.weight,
|
||||
)
|
||||
)
|
||||
if self.t5_encoder is not None:
|
||||
output.t5_encoder = self.t5_encoder.model_copy(deep=True)
|
||||
output.t5_encoder.loras.append(
|
||||
LoRAField(
|
||||
lora=self.lora,
|
||||
weight=self.weight,
|
||||
)
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
@@ -91,14 +110,14 @@ class FluxLoRALoaderInvocation(BaseInvocation):
|
||||
title="FLUX LoRA Collection Loader",
|
||||
tags=["lora", "model", "flux"],
|
||||
category="model",
|
||||
version="1.1.0",
|
||||
version="1.3.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FLUXLoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of LoRAs to a FLUX transformer."""
|
||||
|
||||
loras: LoRAField | list[LoRAField] = InputField(
|
||||
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
loras: Optional[LoRAField | list[LoRAField]] = InputField(
|
||||
default=None, description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
)
|
||||
|
||||
transformer: Optional[TransformerField] = InputField(
|
||||
@@ -113,13 +132,30 @@ class FLUXLoRACollectionLoader(BaseInvocation):
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
)
|
||||
t5_encoder: T5EncoderField | None = InputField(
|
||||
default=None,
|
||||
title="T5 Encoder",
|
||||
description=FieldDescriptions.t5_encoder,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxLoRALoaderOutput:
|
||||
output = FluxLoRALoaderOutput()
|
||||
loras = self.loras if isinstance(self.loras, list) else [self.loras]
|
||||
added_loras: list[str] = []
|
||||
|
||||
if self.transformer is not None:
|
||||
output.transformer = self.transformer.model_copy(deep=True)
|
||||
|
||||
if self.clip is not None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
|
||||
if self.t5_encoder is not None:
|
||||
output.t5_encoder = self.t5_encoder.model_copy(deep=True)
|
||||
|
||||
for lora in loras:
|
||||
if lora is None:
|
||||
continue
|
||||
if lora.lora.key in added_loras:
|
||||
continue
|
||||
|
||||
@@ -130,14 +166,13 @@ class FLUXLoRACollectionLoader(BaseInvocation):
|
||||
|
||||
added_loras.append(lora.lora.key)
|
||||
|
||||
if self.transformer is not None:
|
||||
if output.transformer is None:
|
||||
output.transformer = self.transformer.model_copy(deep=True)
|
||||
if self.transformer is not None and output.transformer is not None:
|
||||
output.transformer.loras.append(lora)
|
||||
|
||||
if self.clip is not None:
|
||||
if output.clip is None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
if self.clip is not None and output.clip is not None:
|
||||
output.clip.loras.append(lora)
|
||||
|
||||
if self.t5_encoder is not None and output.t5_encoder is not None:
|
||||
output.t5_encoder.loras.append(lora)
|
||||
|
||||
return output
|
||||
|
||||
@@ -40,7 +40,7 @@ class FluxModelLoaderOutput(BaseInvocationOutput):
|
||||
title="Flux Main Model",
|
||||
tags=["model", "flux"],
|
||||
category="model",
|
||||
version="1.0.4",
|
||||
version="1.0.5",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxModelLoaderInvocation(BaseInvocation):
|
||||
@@ -87,7 +87,7 @@ class FluxModelLoaderInvocation(BaseInvocation):
|
||||
return FluxModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder, loras=[]),
|
||||
vae=VAEField(vae=vae),
|
||||
max_seq_len=max_seq_lengths[transformer_config.config_path],
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.modules.conditioner import HFEncoder
|
||||
from invokeai.backend.model_manager.config import ModelFormat
|
||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX, FLUX_LORA_T5_PREFIX
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, FLUXConditioningInfo
|
||||
|
||||
@@ -71,13 +71,45 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
def _t5_encode(self, context: InvocationContext) -> torch.Tensor:
|
||||
prompt = [self.prompt]
|
||||
|
||||
t5_encoder_info = context.models.load(self.t5_encoder.text_encoder)
|
||||
t5_encoder_config = t5_encoder_info.config
|
||||
assert t5_encoder_config is not None
|
||||
|
||||
with (
|
||||
context.models.load(self.t5_encoder.text_encoder) as t5_text_encoder,
|
||||
t5_encoder_info.model_on_device() as (cached_weights, t5_text_encoder),
|
||||
context.models.load(self.t5_encoder.tokenizer) as t5_tokenizer,
|
||||
ExitStack() as exit_stack,
|
||||
):
|
||||
assert isinstance(t5_text_encoder, T5EncoderModel)
|
||||
assert isinstance(t5_tokenizer, (T5Tokenizer, T5TokenizerFast))
|
||||
|
||||
# Determine if the model is quantized.
|
||||
# If the model is quantized, then we need to apply the LoRA weights as sidecar layers. This results in
|
||||
# slower inference than direct patching, but is agnostic to the quantization format.
|
||||
if t5_encoder_config.format in [ModelFormat.T5Encoder, ModelFormat.Diffusers]:
|
||||
model_is_quantized = False
|
||||
elif t5_encoder_config.format in [
|
||||
ModelFormat.BnbQuantizedLlmInt8b,
|
||||
ModelFormat.BnbQuantizednf4b,
|
||||
ModelFormat.GGUFQuantized,
|
||||
]:
|
||||
model_is_quantized = True
|
||||
else:
|
||||
raise ValueError(f"Unsupported model format: {t5_encoder_config.format}")
|
||||
|
||||
# Apply LoRA models to the T5 encoder.
|
||||
# Note: We apply the LoRA after the encoder has been moved to its target device for faster patching.
|
||||
exit_stack.enter_context(
|
||||
LayerPatcher.apply_smart_model_patches(
|
||||
model=t5_text_encoder,
|
||||
patches=self._t5_lora_iterator(context),
|
||||
prefix=FLUX_LORA_T5_PREFIX,
|
||||
dtype=t5_text_encoder.dtype,
|
||||
cached_weights=cached_weights,
|
||||
force_sidecar_patching=model_is_quantized,
|
||||
)
|
||||
)
|
||||
|
||||
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
|
||||
|
||||
context.util.signal_progress("Running T5 encoder")
|
||||
@@ -132,3 +164,10 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
assert isinstance(lora_info.model, ModelPatchRaw)
|
||||
yield (lora_info.model, lora.weight)
|
||||
del lora_info
|
||||
|
||||
def _t5_lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[ModelPatchRaw, float]]:
|
||||
for lora in self.t5_encoder.loras:
|
||||
lora_info = context.models.load(lora.lora)
|
||||
assert isinstance(lora_info.model, ModelPatchRaw)
|
||||
yield (lora_info.model, lora.weight)
|
||||
del lora_info
|
||||
|
||||
@@ -843,7 +843,7 @@ CHANNEL_FORMATS = {
|
||||
"value",
|
||||
],
|
||||
category="image",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add or subtract a value from a specific color channel of an image."""
|
||||
@@ -853,18 +853,22 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
offset: int = InputField(default=0, ge=-255, le=255, description="The amount to adjust the channel by")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
pil_image = context.images.get_pil(self.image.image_name)
|
||||
image = context.images.get_pil(self.image.image_name, "RGBA")
|
||||
|
||||
# extract the channel and mode from the input and reference tuple
|
||||
mode = CHANNEL_FORMATS[self.channel][0]
|
||||
channel_number = CHANNEL_FORMATS[self.channel][1]
|
||||
|
||||
# Convert PIL image to new format
|
||||
converted_image = numpy.array(pil_image.convert(mode)).astype(int)
|
||||
converted_image = numpy.array(image.convert(mode)).astype(int)
|
||||
image_channel = converted_image[:, :, channel_number]
|
||||
|
||||
# Adjust the value, clipping to 0..255
|
||||
image_channel = numpy.clip(image_channel + self.offset, 0, 255)
|
||||
if self.channel == "Hue (HSV)":
|
||||
# loop around the values because hue is special
|
||||
image_channel = (image_channel + self.offset) % 256
|
||||
else:
|
||||
# Adjust the value, clipping to 0..255
|
||||
image_channel = numpy.clip(image_channel + self.offset, 0, 255)
|
||||
|
||||
# Put the channel back into the image
|
||||
converted_image[:, :, channel_number] = image_channel
|
||||
@@ -872,6 +876,10 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
# Convert back to RGBA format and output
|
||||
pil_image = Image.fromarray(converted_image.astype(numpy.uint8), mode=mode).convert("RGBA")
|
||||
|
||||
# restore the alpha channel
|
||||
if self.channel != "Alpha (RGBA)":
|
||||
pil_image.putalpha(image.getchannel("A"))
|
||||
|
||||
image_dto = context.images.save(image=pil_image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
@@ -899,7 +907,7 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"value",
|
||||
],
|
||||
category="image",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Scale a specific color channel of an image."""
|
||||
@@ -910,14 +918,14 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
invert_channel: bool = InputField(default=False, description="Invert the channel after scaling")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
pil_image = context.images.get_pil(self.image.image_name)
|
||||
image = context.images.get_pil(self.image.image_name, "RGBA")
|
||||
|
||||
# extract the channel and mode from the input and reference tuple
|
||||
mode = CHANNEL_FORMATS[self.channel][0]
|
||||
channel_number = CHANNEL_FORMATS[self.channel][1]
|
||||
|
||||
# Convert PIL image to new format
|
||||
converted_image = numpy.array(pil_image.convert(mode)).astype(float)
|
||||
converted_image = numpy.array(image.convert(mode)).astype(float)
|
||||
image_channel = converted_image[:, :, channel_number]
|
||||
|
||||
# Adjust the value, clipping to 0..255
|
||||
@@ -933,6 +941,10 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
# Convert back to RGBA format and output
|
||||
pil_image = Image.fromarray(converted_image.astype(numpy.uint8), mode=mode).convert("RGBA")
|
||||
|
||||
# restore the alpha channel
|
||||
if self.channel != "Alpha (RGBA)":
|
||||
pil_image.putalpha(image.getchannel("A"))
|
||||
|
||||
image_dto = context.images.save(image=pil_image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
@@ -86,7 +86,7 @@ class AlphaMaskToTensorInvocation(BaseInvocation):
|
||||
title="Invert Tensor Mask",
|
||||
tags=["conditioning"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
version="1.1.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class InvertTensorMaskInvocation(BaseInvocation):
|
||||
@@ -96,6 +96,15 @@ class InvertTensorMaskInvocation(BaseInvocation):
|
||||
|
||||
def invoke(self, context: InvocationContext) -> MaskOutput:
|
||||
mask = context.tensors.load(self.mask.tensor_name)
|
||||
|
||||
# Verify dtype and shape.
|
||||
assert mask.dtype == torch.bool
|
||||
assert mask.dim() in [2, 3]
|
||||
|
||||
# Unsqueeze the channel dimension if it is missing. The MaskOutput type expects a single channel.
|
||||
if mask.dim() == 2:
|
||||
mask = mask.unsqueeze(0)
|
||||
|
||||
inverted = ~mask
|
||||
|
||||
return MaskOutput(
|
||||
|
||||
@@ -18,6 +18,7 @@ from invokeai.app.invocations.fields import (
|
||||
UIType,
|
||||
)
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import StringOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
@@ -275,3 +276,33 @@ class CoreMetadataInvocation(BaseInvocation):
|
||||
return MetadataOutput(metadata=MetadataField.model_validate(as_dict))
|
||||
|
||||
model_config = ConfigDict(extra="allow")
|
||||
|
||||
|
||||
@invocation(
|
||||
"metadata_field_extractor",
|
||||
title="Metadata Field Extractor",
|
||||
tags=["metadata"],
|
||||
category="metadata",
|
||||
version="1.0.0",
|
||||
)
|
||||
class MetadataFieldExtractorInvocation(BaseInvocation):
|
||||
"""Extracts the text value from an image's metadata given a key.
|
||||
Raises an error if the image has no metadata or if the value is not a string (nesting not permitted)."""
|
||||
|
||||
image: ImageField = InputField(description="The image to extract metadata from")
|
||||
key: str = InputField(description="The key in the image's metadata to extract the value from")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> StringOutput:
|
||||
image_name = self.image.image_name
|
||||
|
||||
metadata = context.images.get_metadata(image_name=image_name)
|
||||
if not metadata:
|
||||
raise ValueError(f"No metadata found on image {image_name}")
|
||||
|
||||
try:
|
||||
val = metadata.root[self.key]
|
||||
if not isinstance(val, str):
|
||||
raise ValueError(f"Metadata at key '{self.key}' must be a string")
|
||||
return StringOutput(value=val)
|
||||
except KeyError as e:
|
||||
raise ValueError(f"No key '{self.key}' found in the metadata for {image_name}") from e
|
||||
|
||||
@@ -68,6 +68,7 @@ class CLIPField(BaseModel):
|
||||
class T5EncoderField(BaseModel):
|
||||
tokenizer: ModelIdentifierField = Field(description="Info to load tokenizer submodel")
|
||||
text_encoder: ModelIdentifierField = Field(description="Info to load text_encoder submodel")
|
||||
loras: List[LoRAField] = Field(description="LoRAs to apply on model loading")
|
||||
|
||||
|
||||
class VAEField(BaseModel):
|
||||
@@ -205,7 +206,7 @@ class LoRALoaderInvocation(BaseInvocation):
|
||||
lora_key = self.lora.key
|
||||
|
||||
if not context.models.exists(lora_key):
|
||||
raise Exception(f"Unkown lora: {lora_key}!")
|
||||
raise Exception(f"Unknown lora: {lora_key}!")
|
||||
|
||||
if self.unet is not None and any(lora.lora.key == lora_key for lora in self.unet.loras):
|
||||
raise Exception(f'LoRA "{lora_key}" already applied to unet')
|
||||
@@ -256,12 +257,12 @@ class LoRASelectorInvocation(BaseInvocation):
|
||||
return LoRASelectorOutput(lora=LoRAField(lora=self.lora, weight=self.weight))
|
||||
|
||||
|
||||
@invocation("lora_collection_loader", title="LoRA Collection Loader", tags=["model"], category="model", version="1.0.0")
|
||||
@invocation("lora_collection_loader", title="LoRA Collection Loader", tags=["model"], category="model", version="1.1.0")
|
||||
class LoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of LoRAs to the provided UNet and CLIP models."""
|
||||
|
||||
loras: LoRAField | list[LoRAField] = InputField(
|
||||
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
loras: Optional[LoRAField | list[LoRAField]] = InputField(
|
||||
default=None, description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
default=None,
|
||||
@@ -281,7 +282,14 @@ class LoRACollectionLoader(BaseInvocation):
|
||||
loras = self.loras if isinstance(self.loras, list) else [self.loras]
|
||||
added_loras: list[str] = []
|
||||
|
||||
if self.unet is not None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
if self.clip is not None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
|
||||
for lora in loras:
|
||||
if lora is None:
|
||||
continue
|
||||
if lora.lora.key in added_loras:
|
||||
continue
|
||||
|
||||
@@ -292,14 +300,10 @@ class LoRACollectionLoader(BaseInvocation):
|
||||
|
||||
added_loras.append(lora.lora.key)
|
||||
|
||||
if self.unet is not None:
|
||||
if output.unet is None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
if self.unet is not None and output.unet is not None:
|
||||
output.unet.loras.append(lora)
|
||||
|
||||
if self.clip is not None:
|
||||
if output.clip is None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
if self.clip is not None and output.clip is not None:
|
||||
output.clip.loras.append(lora)
|
||||
|
||||
return output
|
||||
@@ -399,13 +403,13 @@ class SDXLLoRALoaderInvocation(BaseInvocation):
|
||||
title="SDXL LoRA Collection Loader",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
version="1.1.0",
|
||||
)
|
||||
class SDXLLoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of SDXL LoRAs to the provided UNet and CLIP models."""
|
||||
|
||||
loras: LoRAField | list[LoRAField] = InputField(
|
||||
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
loras: Optional[LoRAField | list[LoRAField]] = InputField(
|
||||
default=None, description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
default=None,
|
||||
@@ -431,7 +435,18 @@ class SDXLLoRACollectionLoader(BaseInvocation):
|
||||
loras = self.loras if isinstance(self.loras, list) else [self.loras]
|
||||
added_loras: list[str] = []
|
||||
|
||||
if self.unet is not None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
|
||||
if self.clip is not None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
|
||||
if self.clip2 is not None:
|
||||
output.clip2 = self.clip2.model_copy(deep=True)
|
||||
|
||||
for lora in loras:
|
||||
if lora is None:
|
||||
continue
|
||||
if lora.lora.key in added_loras:
|
||||
continue
|
||||
|
||||
@@ -442,19 +457,13 @@ class SDXLLoRACollectionLoader(BaseInvocation):
|
||||
|
||||
added_loras.append(lora.lora.key)
|
||||
|
||||
if self.unet is not None:
|
||||
if output.unet is None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
if self.unet is not None and output.unet is not None:
|
||||
output.unet.loras.append(lora)
|
||||
|
||||
if self.clip is not None:
|
||||
if output.clip is None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
if self.clip is not None and output.clip is not None:
|
||||
output.clip.loras.append(lora)
|
||||
|
||||
if self.clip2 is not None:
|
||||
if output.clip2 is None:
|
||||
output.clip2 = self.clip2.model_copy(deep=True)
|
||||
if self.clip2 is not None and output.clip2 is not None:
|
||||
output.clip2.loras.append(lora)
|
||||
|
||||
return output
|
||||
@@ -472,7 +481,7 @@ class VAELoaderInvocation(BaseInvocation):
|
||||
key = self.vae_model.key
|
||||
|
||||
if not context.models.exists(key):
|
||||
raise Exception(f"Unkown vae: {key}!")
|
||||
raise Exception(f"Unknown vae: {key}!")
|
||||
|
||||
return VAEOutput(vae=VAEField(vae=self.vae_model))
|
||||
|
||||
|
||||
@@ -416,6 +416,7 @@ class ColorInvocation(BaseInvocation):
|
||||
class MaskOutput(BaseInvocationOutput):
|
||||
"""A torch mask tensor."""
|
||||
|
||||
# shape: [1, H, W], dtype: bool
|
||||
mask: TensorField = OutputField(description="The mask.")
|
||||
width: int = OutputField(description="The width of the mask in pixels.")
|
||||
height: int = OutputField(description="The height of the mask in pixels.")
|
||||
|
||||
@@ -99,6 +99,6 @@ class Sd3ModelLoaderInvocation(BaseInvocation):
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
clip_l=CLIPField(tokenizer=tokenizer_l, text_encoder=clip_encoder_l, loras=[], skipped_layers=0),
|
||||
clip_g=CLIPField(tokenizer=tokenizer_g, text_encoder=clip_encoder_g, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer_t5, text_encoder=t5_encoder),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer_t5, text_encoder=t5_encoder, loras=[]),
|
||||
vae=VAEField(vae=vae),
|
||||
)
|
||||
|
||||
@@ -49,7 +49,7 @@ class SAMPointsField(BaseModel):
|
||||
title="Segment Anything",
|
||||
tags=["prompt", "segmentation"],
|
||||
category="segmentation",
|
||||
version="1.1.0",
|
||||
version="1.2.0",
|
||||
)
|
||||
class SegmentAnythingInvocation(BaseInvocation):
|
||||
"""Runs a Segment Anything Model."""
|
||||
@@ -96,8 +96,10 @@ class SegmentAnythingInvocation(BaseInvocation):
|
||||
# masks contains bool values, so we merge them via max-reduce.
|
||||
combined_mask, _ = torch.stack(masks).max(dim=0)
|
||||
|
||||
# Unsqueeze the channel dimension.
|
||||
combined_mask = combined_mask.unsqueeze(0)
|
||||
mask_tensor_name = context.tensors.save(combined_mask)
|
||||
height, width = combined_mask.shape
|
||||
_, height, width = combined_mask.shape
|
||||
return MaskOutput(mask=TensorField(tensor_name=mask_tensor_name), width=width, height=height)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -28,6 +28,7 @@ from invokeai.app.services.events.events_common import (
|
||||
ModelLoadCompleteEvent,
|
||||
ModelLoadStartedEvent,
|
||||
QueueClearedEvent,
|
||||
QueueItemsRetriedEvent,
|
||||
QueueItemStatusChangedEvent,
|
||||
)
|
||||
|
||||
@@ -39,6 +40,7 @@ if TYPE_CHECKING:
|
||||
from invokeai.app.services.session_queue.session_queue_common import (
|
||||
BatchStatus,
|
||||
EnqueueBatchResult,
|
||||
RetryItemsResult,
|
||||
SessionQueueItem,
|
||||
SessionQueueStatus,
|
||||
)
|
||||
@@ -99,6 +101,10 @@ class EventServiceBase:
|
||||
"""Emitted when a batch is enqueued"""
|
||||
self.dispatch(BatchEnqueuedEvent.build(enqueue_result))
|
||||
|
||||
def emit_queue_items_retried(self, retry_result: "RetryItemsResult") -> None:
|
||||
"""Emitted when a list of queue items are retried"""
|
||||
self.dispatch(QueueItemsRetriedEvent.build(retry_result))
|
||||
|
||||
def emit_queue_cleared(self, queue_id: str) -> None:
|
||||
"""Emitted when a queue is cleared"""
|
||||
self.dispatch(QueueClearedEvent.build(queue_id))
|
||||
|
||||
@@ -10,6 +10,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
QUEUE_ITEM_STATUS,
|
||||
BatchStatus,
|
||||
EnqueueBatchResult,
|
||||
RetryItemsResult,
|
||||
SessionQueueItem,
|
||||
SessionQueueStatus,
|
||||
)
|
||||
@@ -290,6 +291,22 @@ class BatchEnqueuedEvent(QueueEventBase):
|
||||
)
|
||||
|
||||
|
||||
@payload_schema.register
|
||||
class QueueItemsRetriedEvent(QueueEventBase):
|
||||
"""Event model for queue_items_retried"""
|
||||
|
||||
__event_name__ = "queue_items_retried"
|
||||
|
||||
retried_item_ids: list[int] = Field(description="The IDs of the queue items that were retried")
|
||||
|
||||
@classmethod
|
||||
def build(cls, retry_result: RetryItemsResult) -> "QueueItemsRetriedEvent":
|
||||
return cls(
|
||||
queue_id=retry_result.queue_id,
|
||||
retried_item_ids=retry_result.retried_item_ids,
|
||||
)
|
||||
|
||||
|
||||
@payload_schema.register
|
||||
class QueueClearedEvent(QueueEventBase):
|
||||
"""Event model for queue_cleared"""
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# TODO: Should these excpetions subclass existing python exceptions?
|
||||
# TODO: Should these exceptions subclass existing python exceptions?
|
||||
class ModelImageFileNotFoundException(Exception):
|
||||
"""Raised when an image file is not found in storage."""
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
QUEUE_ITEM_STATUS,
|
||||
Batch,
|
||||
BatchStatus,
|
||||
CancelAllExceptCurrentResult,
|
||||
CancelByBatchIDsResult,
|
||||
CancelByDestinationResult,
|
||||
CancelByQueueIDResult,
|
||||
@@ -13,6 +14,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
IsEmptyResult,
|
||||
IsFullResult,
|
||||
PruneResult,
|
||||
RetryItemsResult,
|
||||
SessionQueueCountsByDestination,
|
||||
SessionQueueItem,
|
||||
SessionQueueItemDTO,
|
||||
@@ -112,6 +114,11 @@ class SessionQueueBase(ABC):
|
||||
"""Cancels all queue items with matching queue ID"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cancel_all_except_current(self, queue_id: str) -> CancelAllExceptCurrentResult:
|
||||
"""Cancels all queue items except in-progress items"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list_queue_items(
|
||||
self,
|
||||
@@ -133,3 +140,8 @@ class SessionQueueBase(ABC):
|
||||
def set_queue_item_session(self, item_id: int, session: GraphExecutionState) -> SessionQueueItem:
|
||||
"""Sets the session for a session queue item. Use this to update the session state."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def retry_items_by_id(self, queue_id: str, item_ids: list[int]) -> RetryItemsResult:
|
||||
"""Retries the given queue items"""
|
||||
pass
|
||||
|
||||
@@ -234,6 +234,9 @@ class SessionQueueItemWithoutGraph(BaseModel):
|
||||
field_values: Optional[list[NodeFieldValue]] = Field(
|
||||
default=None, description="The field values that were used for this queue item"
|
||||
)
|
||||
retried_from_item_id: Optional[int] = Field(
|
||||
default=None, description="The item_id of the queue item that this item was retried from"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def queue_item_dto_from_dict(cls, queue_item_dict: dict) -> "SessionQueueItemDTO":
|
||||
@@ -344,6 +347,11 @@ class EnqueueBatchResult(BaseModel):
|
||||
priority: int = Field(description="The priority of the enqueued batch")
|
||||
|
||||
|
||||
class RetryItemsResult(BaseModel):
|
||||
queue_id: str = Field(description="The ID of the queue")
|
||||
retried_item_ids: list[int] = Field(description="The IDs of the queue items that were retried")
|
||||
|
||||
|
||||
class ClearResult(BaseModel):
|
||||
"""Result of clearing the session queue"""
|
||||
|
||||
@@ -374,6 +382,12 @@ class CancelByQueueIDResult(CancelByBatchIDsResult):
|
||||
pass
|
||||
|
||||
|
||||
class CancelAllExceptCurrentResult(CancelByBatchIDsResult):
|
||||
"""Result of canceling all except current"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IsEmptyResult(BaseModel):
|
||||
"""Result of checking if the session queue is empty"""
|
||||
|
||||
@@ -475,6 +489,7 @@ class SessionQueueValueToInsert(NamedTuple):
|
||||
workflow: Optional[str] # workflow json
|
||||
origin: str | None
|
||||
destination: str | None
|
||||
retried_from_item_id: int | None = None
|
||||
|
||||
|
||||
ValuesToInsert: TypeAlias = list[SessionQueueValueToInsert]
|
||||
@@ -487,16 +502,16 @@ def prepare_values_to_insert(queue_id: str, batch: Batch, priority: int, max_new
|
||||
session.id = uuid_string()
|
||||
values_to_insert.append(
|
||||
SessionQueueValueToInsert(
|
||||
queue_id, # queue_id
|
||||
session.model_dump_json(warnings=False, exclude_none=True), # session (json)
|
||||
session.id, # session_id
|
||||
batch.batch_id, # batch_id
|
||||
queue_id=queue_id,
|
||||
session=session.model_dump_json(warnings=False, exclude_none=True), # as json
|
||||
session_id=session.id,
|
||||
batch_id=batch.batch_id,
|
||||
# must use pydantic_encoder bc field_values is a list of models
|
||||
json.dumps(field_values, default=to_jsonable_python) if field_values else None, # field_values (json)
|
||||
priority, # priority
|
||||
json.dumps(workflow, default=to_jsonable_python) if workflow else None, # workflow (json)
|
||||
batch.origin, # origin
|
||||
batch.destination, # destination
|
||||
field_values=json.dumps(field_values, default=to_jsonable_python) if field_values else None, # as json
|
||||
priority=priority,
|
||||
workflow=json.dumps(workflow, default=to_jsonable_python) if workflow else None, # as json
|
||||
origin=batch.origin,
|
||||
destination=batch.destination,
|
||||
)
|
||||
)
|
||||
return values_to_insert
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import json
|
||||
import sqlite3
|
||||
import threading
|
||||
from typing import Optional, Union, cast
|
||||
|
||||
from pydantic_core import to_jsonable_python
|
||||
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.session_queue.session_queue_base import SessionQueueBase
|
||||
from invokeai.app.services.session_queue.session_queue_common import (
|
||||
@@ -9,6 +12,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
QUEUE_ITEM_STATUS,
|
||||
Batch,
|
||||
BatchStatus,
|
||||
CancelAllExceptCurrentResult,
|
||||
CancelByBatchIDsResult,
|
||||
CancelByDestinationResult,
|
||||
CancelByQueueIDResult,
|
||||
@@ -17,11 +21,13 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
IsEmptyResult,
|
||||
IsFullResult,
|
||||
PruneResult,
|
||||
RetryItemsResult,
|
||||
SessionQueueCountsByDestination,
|
||||
SessionQueueItem,
|
||||
SessionQueueItemDTO,
|
||||
SessionQueueItemNotFoundError,
|
||||
SessionQueueStatus,
|
||||
SessionQueueValueToInsert,
|
||||
calc_session_count,
|
||||
prepare_values_to_insert,
|
||||
)
|
||||
@@ -129,8 +135,8 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
|
||||
self.__cursor.executemany(
|
||||
"""--sql
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
values_to_insert,
|
||||
)
|
||||
@@ -510,6 +516,39 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
self.__lock.release()
|
||||
return CancelByQueueIDResult(canceled=count)
|
||||
|
||||
def cancel_all_except_current(self, queue_id: str) -> CancelAllExceptCurrentResult:
|
||||
try:
|
||||
where = """--sql
|
||||
WHERE
|
||||
queue_id == ?
|
||||
AND status == 'pending'
|
||||
"""
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM session_queue
|
||||
{where};
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
count = self.__cursor.fetchone()[0]
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
UPDATE session_queue
|
||||
SET status = 'canceled'
|
||||
{where};
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return CancelAllExceptCurrentResult(canceled=count)
|
||||
|
||||
def get_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
@@ -727,3 +766,71 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
canceled=counts.get("canceled", 0),
|
||||
total=total,
|
||||
)
|
||||
|
||||
def retry_items_by_id(self, queue_id: str, item_ids: list[int]) -> RetryItemsResult:
|
||||
"""Retries the given queue items"""
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
|
||||
values_to_insert: list[SessionQueueValueToInsert] = []
|
||||
retried_item_ids: list[int] = []
|
||||
|
||||
for item_id in item_ids:
|
||||
queue_item = self.get_queue_item(item_id)
|
||||
|
||||
if queue_item.status not in ("failed", "canceled"):
|
||||
continue
|
||||
|
||||
retried_item_ids.append(item_id)
|
||||
|
||||
field_values_json = (
|
||||
json.dumps(queue_item.field_values, default=to_jsonable_python) if queue_item.field_values else None
|
||||
)
|
||||
workflow_json = (
|
||||
json.dumps(queue_item.workflow, default=to_jsonable_python) if queue_item.workflow else None
|
||||
)
|
||||
cloned_session = GraphExecutionState(graph=queue_item.session.graph)
|
||||
cloned_session_json = cloned_session.model_dump_json(warnings=False, exclude_none=True)
|
||||
|
||||
retried_from_item_id = (
|
||||
queue_item.retried_from_item_id
|
||||
if queue_item.retried_from_item_id is not None
|
||||
else queue_item.item_id
|
||||
)
|
||||
|
||||
value_to_insert = SessionQueueValueToInsert(
|
||||
queue_id=queue_item.queue_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
destination=queue_item.destination,
|
||||
field_values=field_values_json,
|
||||
origin=queue_item.origin,
|
||||
priority=queue_item.priority,
|
||||
workflow=workflow_json,
|
||||
session=cloned_session_json,
|
||||
session_id=cloned_session.id,
|
||||
retried_from_item_id=retried_from_item_id,
|
||||
)
|
||||
values_to_insert.append(value_to_insert)
|
||||
|
||||
# TODO(psyche): Handle max queue size?
|
||||
|
||||
self.__cursor.executemany(
|
||||
"""--sql
|
||||
INSERT INTO session_queue (queue_id, session, session_id, batch_id, field_values, priority, workflow, origin, destination, retried_from_item_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
values_to_insert,
|
||||
)
|
||||
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
retry_result = RetryItemsResult(
|
||||
queue_id=queue_id,
|
||||
retried_item_ids=retried_item_ids,
|
||||
)
|
||||
self.__invoker.services.events.emit_queue_items_retried(retry_result)
|
||||
return retry_result
|
||||
|
||||
@@ -51,15 +51,18 @@ class Edge(BaseModel):
|
||||
source: EdgeConnection = Field(description="The connection for the edge's from node and field")
|
||||
destination: EdgeConnection = Field(description="The connection for the edge's to node and field")
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.source.node_id}.{self.source.field} -> {self.destination.node_id}.{self.destination.field}"
|
||||
|
||||
def get_output_field(node: BaseInvocation, field: str) -> Any:
|
||||
|
||||
def get_output_field_type(node: BaseInvocation, field: str) -> Any:
|
||||
node_type = type(node)
|
||||
node_outputs = get_type_hints(node_type.get_output_annotation())
|
||||
node_output_field = node_outputs.get(field) or None
|
||||
return node_output_field
|
||||
|
||||
|
||||
def get_input_field(node: BaseInvocation, field: str) -> Any:
|
||||
def get_input_field_type(node: BaseInvocation, field: str) -> Any:
|
||||
node_type = type(node)
|
||||
node_inputs = get_type_hints(node_type)
|
||||
node_input_field = node_inputs.get(field) or None
|
||||
@@ -93,6 +96,10 @@ def is_list_or_contains_list(t):
|
||||
return False
|
||||
|
||||
|
||||
def is_any(t: Any) -> bool:
|
||||
return t == Any or Any in get_args(t)
|
||||
|
||||
|
||||
def are_connection_types_compatible(from_type: Any, to_type: Any) -> bool:
|
||||
if not from_type:
|
||||
return False
|
||||
@@ -102,13 +109,7 @@ def are_connection_types_compatible(from_type: Any, to_type: Any) -> bool:
|
||||
# TODO: this is pretty forgiving on generic types. Clean that up (need to handle optionals and such)
|
||||
if from_type and to_type:
|
||||
# Ports are compatible
|
||||
if (
|
||||
from_type == to_type
|
||||
or from_type == Any
|
||||
or to_type == Any
|
||||
or Any in get_args(from_type)
|
||||
or Any in get_args(to_type)
|
||||
):
|
||||
if from_type == to_type or is_any(from_type) or is_any(to_type):
|
||||
return True
|
||||
|
||||
if from_type in get_args(to_type):
|
||||
@@ -140,10 +141,10 @@ def are_connections_compatible(
|
||||
"""Determines if a connection between fields of two nodes is compatible."""
|
||||
|
||||
# TODO: handle iterators and collectors
|
||||
from_node_field = get_output_field(from_node, from_field)
|
||||
to_node_field = get_input_field(to_node, to_field)
|
||||
from_type = get_output_field_type(from_node, from_field)
|
||||
to_type = get_input_field_type(to_node, to_field)
|
||||
|
||||
return are_connection_types_compatible(from_node_field, to_node_field)
|
||||
return are_connection_types_compatible(from_type, to_type)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
@@ -440,17 +441,19 @@ class Graph(BaseModel):
|
||||
self.get_node(edge.destination.node_id),
|
||||
edge.destination.field,
|
||||
):
|
||||
raise InvalidEdgeError(
|
||||
f"Invalid edge from {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
raise InvalidEdgeError(f"Edge source and target types do not match ({edge})")
|
||||
|
||||
# Validate all iterators & collectors
|
||||
# TODO: may need to validate all iterators & collectors in subgraphs so edge connections in parent graphs will be available
|
||||
for node in self.nodes.values():
|
||||
if isinstance(node, IterateInvocation) and not self._is_iterator_connection_valid(node.id):
|
||||
raise InvalidEdgeError(f"Invalid iterator node {node.id}")
|
||||
if isinstance(node, CollectInvocation) and not self._is_collector_connection_valid(node.id):
|
||||
raise InvalidEdgeError(f"Invalid collector node {node.id}")
|
||||
if isinstance(node, IterateInvocation):
|
||||
err = self._is_iterator_connection_valid(node.id)
|
||||
if err is not None:
|
||||
raise InvalidEdgeError(f"Invalid iterator node ({node.id}): {err}")
|
||||
if isinstance(node, CollectInvocation):
|
||||
err = self._is_collector_connection_valid(node.id)
|
||||
if err is not None:
|
||||
raise InvalidEdgeError(f"Invalid collector node ({node.id}): {err}")
|
||||
|
||||
return None
|
||||
|
||||
@@ -477,11 +480,11 @@ class Graph(BaseModel):
|
||||
|
||||
def _is_destination_field_Any(self, edge: Edge) -> bool:
|
||||
"""Checks if the destination field for an edge is of type typing.Any"""
|
||||
return get_input_field(self.get_node(edge.destination.node_id), edge.destination.field) == Any
|
||||
return get_input_field_type(self.get_node(edge.destination.node_id), edge.destination.field) == Any
|
||||
|
||||
def _is_destination_field_list_of_Any(self, edge: Edge) -> bool:
|
||||
"""Checks if the destination field for an edge is of type typing.Any"""
|
||||
return get_input_field(self.get_node(edge.destination.node_id), edge.destination.field) == list[Any]
|
||||
return get_input_field_type(self.get_node(edge.destination.node_id), edge.destination.field) == list[Any]
|
||||
|
||||
def _validate_edge(self, edge: Edge):
|
||||
"""Validates that a new edge doesn't create a cycle in the graph"""
|
||||
@@ -491,55 +494,40 @@ class Graph(BaseModel):
|
||||
from_node = self.get_node(edge.source.node_id)
|
||||
to_node = self.get_node(edge.destination.node_id)
|
||||
except NodeNotFoundError:
|
||||
raise InvalidEdgeError("One or both nodes don't exist: {edge.source.node_id} -> {edge.destination.node_id}")
|
||||
raise InvalidEdgeError(f"One or both nodes don't exist ({edge})")
|
||||
|
||||
# Validate that an edge to this node+field doesn't already exist
|
||||
input_edges = self._get_input_edges(edge.destination.node_id, edge.destination.field)
|
||||
if len(input_edges) > 0 and not isinstance(to_node, CollectInvocation):
|
||||
raise InvalidEdgeError(
|
||||
f"Edge to node {edge.destination.node_id} field {edge.destination.field} already exists"
|
||||
)
|
||||
raise InvalidEdgeError(f"Edge already exists ({edge})")
|
||||
|
||||
# Validate that no cycles would be created
|
||||
g = self.nx_graph_flat()
|
||||
g.add_edge(edge.source.node_id, edge.destination.node_id)
|
||||
if not nx.is_directed_acyclic_graph(g):
|
||||
raise InvalidEdgeError(
|
||||
f"Edge creates a cycle in the graph: {edge.source.node_id} -> {edge.destination.node_id}"
|
||||
)
|
||||
raise InvalidEdgeError(f"Edge creates a cycle in the graph ({edge})")
|
||||
|
||||
# Validate that the field types are compatible
|
||||
if not are_connections_compatible(from_node, edge.source.field, to_node, edge.destination.field):
|
||||
raise InvalidEdgeError(
|
||||
f"Fields are incompatible: cannot connect {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
raise InvalidEdgeError(f"Field types are incompatible ({edge})")
|
||||
|
||||
# Validate if iterator output type matches iterator input type (if this edge results in both being set)
|
||||
if isinstance(to_node, IterateInvocation) and edge.destination.field == "collection":
|
||||
if not self._is_iterator_connection_valid(edge.destination.node_id, new_input=edge.source):
|
||||
raise InvalidEdgeError(
|
||||
f"Iterator input type does not match iterator output type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
err = self._is_iterator_connection_valid(edge.destination.node_id, new_input=edge.source)
|
||||
if err is not None:
|
||||
raise InvalidEdgeError(f"Iterator input type does not match iterator output type ({edge}): {err}")
|
||||
|
||||
# Validate if iterator input type matches output type (if this edge results in both being set)
|
||||
if isinstance(from_node, IterateInvocation) and edge.source.field == "item":
|
||||
if not self._is_iterator_connection_valid(edge.source.node_id, new_output=edge.destination):
|
||||
raise InvalidEdgeError(
|
||||
f"Iterator output type does not match iterator input type:, {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
err = self._is_iterator_connection_valid(edge.source.node_id, new_output=edge.destination)
|
||||
if err is not None:
|
||||
raise InvalidEdgeError(f"Iterator output type does not match iterator input type ({edge}): {err}")
|
||||
|
||||
# Validate if collector input type matches output type (if this edge results in both being set)
|
||||
if isinstance(to_node, CollectInvocation) and edge.destination.field == "item":
|
||||
if not self._is_collector_connection_valid(edge.destination.node_id, new_input=edge.source):
|
||||
raise InvalidEdgeError(
|
||||
f"Collector output type does not match collector input type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
|
||||
# Validate that we are not connecting collector to iterator (currently unsupported)
|
||||
if isinstance(from_node, CollectInvocation) and isinstance(to_node, IterateInvocation):
|
||||
raise InvalidEdgeError(
|
||||
f"Cannot connect collector to iterator: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
err = self._is_collector_connection_valid(edge.destination.node_id, new_input=edge.source)
|
||||
if err is not None:
|
||||
raise InvalidEdgeError(f"Collector output type does not match collector input type ({edge}): {err}")
|
||||
|
||||
# Validate if collector output type matches input type (if this edge results in both being set) - skip if the destination field is not Any or list[Any]
|
||||
if (
|
||||
@@ -548,10 +536,9 @@ class Graph(BaseModel):
|
||||
and not self._is_destination_field_list_of_Any(edge)
|
||||
and not self._is_destination_field_Any(edge)
|
||||
):
|
||||
if not self._is_collector_connection_valid(edge.source.node_id, new_output=edge.destination):
|
||||
raise InvalidEdgeError(
|
||||
f"Collector input type does not match collector output type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
err = self._is_collector_connection_valid(edge.source.node_id, new_output=edge.destination)
|
||||
if err is not None:
|
||||
raise InvalidEdgeError(f"Collector input type does not match collector output type ({edge}): {err}")
|
||||
|
||||
def has_node(self, node_id: str) -> bool:
|
||||
"""Determines whether or not a node exists in the graph."""
|
||||
@@ -634,7 +621,7 @@ class Graph(BaseModel):
|
||||
node_id: str,
|
||||
new_input: Optional[EdgeConnection] = None,
|
||||
new_output: Optional[EdgeConnection] = None,
|
||||
) -> bool:
|
||||
) -> str | None:
|
||||
inputs = [e.source for e in self._get_input_edges(node_id, "collection")]
|
||||
outputs = [e.destination for e in self._get_output_edges(node_id, "item")]
|
||||
|
||||
@@ -645,29 +632,47 @@ class Graph(BaseModel):
|
||||
|
||||
# Only one input is allowed for iterators
|
||||
if len(inputs) > 1:
|
||||
return False
|
||||
return "Iterator may only have one input edge"
|
||||
|
||||
input_node = self.get_node(inputs[0].node_id)
|
||||
|
||||
# Get input and output fields (the fields linked to the iterator's input/output)
|
||||
input_field = get_output_field(self.get_node(inputs[0].node_id), inputs[0].field)
|
||||
output_fields = [get_input_field(self.get_node(e.node_id), e.field) for e in outputs]
|
||||
input_field_type = get_output_field_type(input_node, inputs[0].field)
|
||||
output_field_types = [get_input_field_type(self.get_node(e.node_id), e.field) for e in outputs]
|
||||
|
||||
# Input type must be a list
|
||||
if get_origin(input_field) is not list:
|
||||
return False
|
||||
if get_origin(input_field_type) is not list:
|
||||
return "Iterator input must be a collection"
|
||||
|
||||
# Validate that all outputs match the input type
|
||||
input_field_item_type = get_args(input_field)[0]
|
||||
if not all((are_connection_types_compatible(input_field_item_type, f) for f in output_fields)):
|
||||
return False
|
||||
input_field_item_type = get_args(input_field_type)[0]
|
||||
if not all((are_connection_types_compatible(input_field_item_type, t) for t in output_field_types)):
|
||||
return "Iterator outputs must connect to an input with a matching type"
|
||||
|
||||
return True
|
||||
# Collector input type must match all iterator output types
|
||||
if isinstance(input_node, CollectInvocation):
|
||||
# Traverse the graph to find the first collector input edge. Collectors validate that their collection
|
||||
# inputs are all of the same type, so we can use the first input edge to determine the collector's type
|
||||
first_collector_input_edge = self._get_input_edges(input_node.id, "item")[0]
|
||||
first_collector_input_type = get_output_field_type(
|
||||
self.get_node(first_collector_input_edge.source.node_id), first_collector_input_edge.source.field
|
||||
)
|
||||
resolved_collector_type = (
|
||||
first_collector_input_type
|
||||
if get_origin(first_collector_input_type) is None
|
||||
else get_args(first_collector_input_type)
|
||||
)
|
||||
if not all((are_connection_types_compatible(resolved_collector_type, t) for t in output_field_types)):
|
||||
return "Iterator collection type must match all iterator output types"
|
||||
|
||||
return None
|
||||
|
||||
def _is_collector_connection_valid(
|
||||
self,
|
||||
node_id: str,
|
||||
new_input: Optional[EdgeConnection] = None,
|
||||
new_output: Optional[EdgeConnection] = None,
|
||||
) -> bool:
|
||||
) -> str | None:
|
||||
inputs = [e.source for e in self._get_input_edges(node_id, "item")]
|
||||
outputs = [e.destination for e in self._get_output_edges(node_id, "collection")]
|
||||
|
||||
@@ -677,38 +682,42 @@ class Graph(BaseModel):
|
||||
outputs.append(new_output)
|
||||
|
||||
# Get input and output fields (the fields linked to the iterator's input/output)
|
||||
input_fields = [get_output_field(self.get_node(e.node_id), e.field) for e in inputs]
|
||||
output_fields = [get_input_field(self.get_node(e.node_id), e.field) for e in outputs]
|
||||
input_field_types = [get_output_field_type(self.get_node(e.node_id), e.field) for e in inputs]
|
||||
output_field_types = [get_input_field_type(self.get_node(e.node_id), e.field) for e in outputs]
|
||||
|
||||
# Validate that all inputs are derived from or match a single type
|
||||
input_field_types = {
|
||||
t
|
||||
for input_field in input_fields
|
||||
for t in ([input_field] if get_origin(input_field) is None else get_args(input_field))
|
||||
if t != NoneType
|
||||
resolved_type
|
||||
for input_field_type in input_field_types
|
||||
for resolved_type in (
|
||||
[input_field_type] if get_origin(input_field_type) is None else get_args(input_field_type)
|
||||
)
|
||||
if resolved_type != NoneType
|
||||
} # Get unique types
|
||||
type_tree = nx.DiGraph()
|
||||
type_tree.add_nodes_from(input_field_types)
|
||||
type_tree.add_edges_from([e for e in itertools.permutations(input_field_types, 2) if issubclass(e[1], e[0])])
|
||||
type_degrees = type_tree.in_degree(type_tree.nodes)
|
||||
if sum((t[1] == 0 for t in type_degrees)) != 1: # type: ignore
|
||||
return False # There is more than one root type
|
||||
return "Collector input collection items must be of a single type"
|
||||
|
||||
# Get the input root type
|
||||
input_root_type = next(t[0] for t in type_degrees if t[1] == 0) # type: ignore
|
||||
|
||||
# Verify that all outputs are lists
|
||||
if not all(is_list_or_contains_list(f) for f in output_fields):
|
||||
return False
|
||||
if not all(is_list_or_contains_list(t) or is_any(t) for t in output_field_types):
|
||||
return "Collector output must connect to a collection input"
|
||||
|
||||
# Verify that all outputs match the input type (are a base class or the same class)
|
||||
if not all(
|
||||
is_union_subtype(input_root_type, get_args(f)[0]) or issubclass(input_root_type, get_args(f)[0])
|
||||
for f in output_fields
|
||||
is_any(t)
|
||||
or is_union_subtype(input_root_type, get_args(t)[0])
|
||||
or issubclass(input_root_type, get_args(t)[0])
|
||||
for t in output_field_types
|
||||
):
|
||||
return False
|
||||
return "Collector outputs must connect to a collection input with a matching type"
|
||||
|
||||
return True
|
||||
return None
|
||||
|
||||
def nx_graph(self) -> nx.DiGraph:
|
||||
"""Returns a NetworkX DiGraph representing the layout of this graph"""
|
||||
|
||||
@@ -18,6 +18,7 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_12 import
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_13 import build_migration_13
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_14 import build_migration_14
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_15 import build_migration_15
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_16 import build_migration_16
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
||||
|
||||
|
||||
@@ -53,6 +54,7 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
|
||||
migrator.register_migration(build_migration_13())
|
||||
migrator.register_migration(build_migration_14())
|
||||
migrator.register_migration(build_migration_15())
|
||||
migrator.register_migration(build_migration_16())
|
||||
migrator.run_migrations()
|
||||
|
||||
return db
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
import sqlite3
|
||||
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
|
||||
|
||||
class Migration16Callback:
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
self._add_retried_from_item_id_col(cursor)
|
||||
|
||||
def _add_retried_from_item_id_col(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""
|
||||
- Adds `retried_from_item_id` column to the session queue table.
|
||||
"""
|
||||
|
||||
cursor.execute("ALTER TABLE session_queue ADD COLUMN retried_from_item_id INTEGER;")
|
||||
|
||||
|
||||
def build_migration_16() -> Migration:
|
||||
"""
|
||||
Build the migration from database version 15 to 16.
|
||||
|
||||
This migration does the following:
|
||||
- Adds `retried_from_item_id` column to the session queue table.
|
||||
"""
|
||||
migration_16 = Migration(
|
||||
from_version=15,
|
||||
to_version=16,
|
||||
callback=Migration16Callback(),
|
||||
)
|
||||
|
||||
return migration_16
|
||||
@@ -1,8 +1,10 @@
|
||||
import gc
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from functools import wraps
|
||||
from logging import Logger
|
||||
from typing import Dict, List, Optional
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
@@ -41,6 +43,17 @@ def get_model_cache_key(model_key: str, submodel_type: Optional[SubModelType] =
|
||||
return model_key
|
||||
|
||||
|
||||
def synchronized(method: Callable[..., Any]) -> Callable[..., Any]:
|
||||
"""A decorator that applies the class's self._lock to the method."""
|
||||
|
||||
@wraps(method)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
with self._lock: # Automatically acquire and release the lock
|
||||
return method(self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class ModelCache:
|
||||
"""A cache for managing models in memory.
|
||||
|
||||
@@ -125,16 +138,25 @@ class ModelCache:
|
||||
|
||||
self._ram_cache_size_bytes = self._calc_ram_available_to_model_cache()
|
||||
|
||||
# A lock applied to all public method calls to make the ModelCache thread-safe.
|
||||
# At the time of writing, the ModelCache should only be accessed from two threads:
|
||||
# - The graph execution thread
|
||||
# - Requests to empty the cache from a separate thread
|
||||
self._lock = threading.RLock()
|
||||
|
||||
@property
|
||||
@synchronized
|
||||
def stats(self) -> Optional[CacheStats]:
|
||||
"""Return collected CacheStats object."""
|
||||
return self._stats
|
||||
|
||||
@stats.setter
|
||||
@synchronized
|
||||
def stats(self, stats: CacheStats) -> None:
|
||||
"""Set the CacheStats object for collecting cache statistics."""
|
||||
self._stats = stats
|
||||
|
||||
@synchronized
|
||||
def put(self, key: str, model: AnyModel) -> None:
|
||||
"""Add a model to the cache."""
|
||||
if key in self._cached_models:
|
||||
@@ -173,6 +195,7 @@ class ModelCache:
|
||||
f"Added model {key} (Type: {model.__class__.__name__}, Wrap mode: {wrapped_model.__class__.__name__}, Model size: {size/MB:.2f}MB)"
|
||||
)
|
||||
|
||||
@synchronized
|
||||
def get(self, key: str, stats_name: Optional[str] = None) -> CacheRecord:
|
||||
"""Retrieve a model from the cache.
|
||||
|
||||
@@ -208,6 +231,7 @@ class ModelCache:
|
||||
self._logger.debug(f"Cache hit: {key} (Type: {cache_entry.cached_model.model.__class__.__name__})")
|
||||
return cache_entry
|
||||
|
||||
@synchronized
|
||||
def lock(self, cache_entry: CacheRecord, working_mem_bytes: Optional[int]) -> None:
|
||||
"""Lock a model for use and move it into VRAM."""
|
||||
if cache_entry.key not in self._cached_models:
|
||||
@@ -243,6 +267,7 @@ class ModelCache:
|
||||
|
||||
self._log_cache_state()
|
||||
|
||||
@synchronized
|
||||
def unlock(self, cache_entry: CacheRecord) -> None:
|
||||
"""Unlock a model."""
|
||||
if cache_entry.key not in self._cached_models:
|
||||
@@ -400,23 +425,19 @@ class ModelCache:
|
||||
# Heuristics for dynamically calculating the RAM cache size, **in order of increasing priority**:
|
||||
# 1. As an initial default, use 50% of the total RAM for InvokeAI.
|
||||
# - Assume a 2GB baseline for InvokeAI's non-model RAM usage, and use the rest of the RAM for the model cache.
|
||||
# 2. On a system with a lot of RAM (e.g. 64GB+), users probably don't want InvokeAI to eat up too much RAM.
|
||||
# There are diminishing returns to storing more and more models. So, we apply an upper bound.
|
||||
# 2. On a system with a lot of RAM, users probably don't want InvokeAI to eat up too much RAM.
|
||||
# There are diminishing returns to storing more and more models. So, we apply an upper bound. (Keep in mind
|
||||
# that most OSes have some amount of disk caching, which we still benefit from if there is excess memory,
|
||||
# even if we drop models from the cache.)
|
||||
# - On systems without a CUDA device, the upper bound is 32GB.
|
||||
# - On systems with a CUDA device, the upper bound is 2x the amount of VRAM.
|
||||
# 3. On systems with a CUDA device, the minimum should be the VRAM size (less the working memory).
|
||||
# - Setting lower than this would mean that we sometimes kick models out of the cache when there is room for
|
||||
# all models in VRAM.
|
||||
# - Consider an extreme case of a system with 8GB RAM / 24GB VRAM. I haven't tested this, but I think
|
||||
# you'd still want the RAM cache size to be ~24GB (less the working memory). (Though you'd probably want to
|
||||
# set `keep_ram_copy_of_weights: false` in this case.)
|
||||
# 4. Absolute minimum of 4GB.
|
||||
# - On systems with a CUDA device, the upper bound is 1x the amount of VRAM (less the working memory).
|
||||
# 3. Absolute minimum of 4GB.
|
||||
|
||||
# NOTE(ryand): We explored dynamically adjusting the RAM cache size based on memory pressure (using psutil), but
|
||||
# decided against it for now, for the following reasons:
|
||||
# - It was surprisingly difficult to get memory metrics with consistent definitions across OSes. (If you go
|
||||
# down this path again, don't underestimate the amount of complexity here and be sure to test rigorously on all
|
||||
# OSes.)
|
||||
# down this path again, don't underestimate the amount of complexity here and be sure to test rigorously on all
|
||||
# OSes.)
|
||||
# - Making the RAM cache size dynamic opens the door for performance regressions that are hard to diagnose and
|
||||
# hard for users to understand. It is better for users to see that their RAM is maxed out, and then override
|
||||
# the default value if desired.
|
||||
@@ -438,26 +459,18 @@ class ModelCache:
|
||||
# ------------------
|
||||
max_ram_cache_size_bytes = 32 * GB
|
||||
if total_cuda_vram_bytes is not None:
|
||||
max_ram_cache_size_bytes = 2 * total_cuda_vram_bytes
|
||||
if self._max_vram_cache_size_gb is not None:
|
||||
max_ram_cache_size_bytes = int(self._max_vram_cache_size_gb * GB)
|
||||
else:
|
||||
max_ram_cache_size_bytes = total_cuda_vram_bytes - int(self._execution_device_working_mem_gb * GB)
|
||||
if ram_available_to_model_cache > max_ram_cache_size_bytes:
|
||||
heuristics_applied.append(2)
|
||||
ram_available_to_model_cache = max_ram_cache_size_bytes
|
||||
|
||||
# Apply heuristic 3.
|
||||
# ------------------
|
||||
if total_cuda_vram_bytes is not None:
|
||||
if self._max_vram_cache_size_gb is not None:
|
||||
min_ram_cache_size_bytes = int(self._max_vram_cache_size_gb * GB)
|
||||
else:
|
||||
min_ram_cache_size_bytes = total_cuda_vram_bytes - int(self._execution_device_working_mem_gb * GB)
|
||||
if ram_available_to_model_cache < min_ram_cache_size_bytes:
|
||||
heuristics_applied.append(3)
|
||||
ram_available_to_model_cache = min_ram_cache_size_bytes
|
||||
|
||||
# Apply heuristic 4.
|
||||
# ------------------
|
||||
if ram_available_to_model_cache < 4 * GB:
|
||||
heuristics_applied.append(4)
|
||||
heuristics_applied.append(3)
|
||||
ram_available_to_model_cache = 4 * GB
|
||||
|
||||
self._logger.info(
|
||||
@@ -588,6 +601,7 @@ class ModelCache:
|
||||
|
||||
self._logger.debug(log)
|
||||
|
||||
@synchronized
|
||||
def make_room(self, bytes_needed: int) -> None:
|
||||
"""Make enough room in the cache to accommodate a new model of indicated size.
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custo
|
||||
CustomModuleMixin,
|
||||
)
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.concatenated_lora_layer import ConcatenatedLoRALayer
|
||||
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
|
||||
from invokeai.backend.patches.layers.lora_layer import LoRALayer
|
||||
|
||||
@@ -22,25 +21,6 @@ def linear_lora_forward(input: torch.Tensor, lora_layer: LoRALayer, lora_weight:
|
||||
return x
|
||||
|
||||
|
||||
def concatenated_lora_forward(
|
||||
input: torch.Tensor, concatenated_lora_layer: ConcatenatedLoRALayer, lora_weight: float
|
||||
) -> torch.Tensor:
|
||||
"""An optimized implementation of the residual calculation for a sidecar ConcatenatedLoRALayer."""
|
||||
x_chunks: list[torch.Tensor] = []
|
||||
for lora_layer in concatenated_lora_layer.lora_layers:
|
||||
x_chunk = torch.nn.functional.linear(input, lora_layer.down)
|
||||
if lora_layer.mid is not None:
|
||||
x_chunk = torch.nn.functional.linear(x_chunk, lora_layer.mid)
|
||||
x_chunk = torch.nn.functional.linear(x_chunk, lora_layer.up, bias=lora_layer.bias)
|
||||
x_chunk *= lora_weight * lora_layer.scale()
|
||||
x_chunks.append(x_chunk)
|
||||
|
||||
# TODO(ryand): Generalize to support concat_axis != 0.
|
||||
assert concatenated_lora_layer.concat_axis == 0
|
||||
x = torch.cat(x_chunks, dim=-1)
|
||||
return x
|
||||
|
||||
|
||||
def autocast_linear_forward_sidecar_patches(
|
||||
orig_module: torch.nn.Linear, input: torch.Tensor, patches_and_weights: list[tuple[BaseLayerPatch, float]]
|
||||
) -> torch.Tensor:
|
||||
@@ -66,8 +46,6 @@ def autocast_linear_forward_sidecar_patches(
|
||||
output += linear_lora_forward(orig_input, patch, patch_weight)
|
||||
elif isinstance(patch, LoRALayer):
|
||||
output += linear_lora_forward(input, patch, patch_weight)
|
||||
elif isinstance(patch, ConcatenatedLoRALayer):
|
||||
output += concatenated_lora_forward(input, patch, patch_weight)
|
||||
else:
|
||||
unprocessed_patches_and_weights.append((patch, patch_weight))
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@ import copy
|
||||
import torch
|
||||
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.param_shape_utils import get_param_shape
|
||||
from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
|
||||
|
||||
|
||||
class CustomModuleMixin:
|
||||
@@ -42,6 +44,20 @@ class CustomModuleMixin:
|
||||
device: torch.device | None = None,
|
||||
):
|
||||
"""Helper function that aggregates the parameters from all patches into a single dict."""
|
||||
# HACK(ryand): If the original parameters are in a quantized format whose weights can't be accessed, we replace
|
||||
# them with dummy tensors on the 'meta' device. This allows patch layers to access the shapes of the original
|
||||
# parameters. But, of course, any sub-layers that need to access the actual values of the parameters will fail.
|
||||
for param_name in orig_params.keys():
|
||||
param = orig_params[param_name]
|
||||
if type(param) is torch.nn.Parameter and type(param.data) is torch.Tensor:
|
||||
pass
|
||||
elif type(param) is GGMLTensor:
|
||||
# Move to device and dequantize here. Doing it in the patch layer can result in redundant casts /
|
||||
# dequantizations.
|
||||
orig_params[param_name] = param.to(device=device).get_dequantized_tensor()
|
||||
else:
|
||||
orig_params[param_name] = torch.empty(get_param_shape(param), device="meta")
|
||||
|
||||
params: dict[str, torch.Tensor] = {}
|
||||
|
||||
for patch, patch_weight in patches_and_weights:
|
||||
|
||||
@@ -31,6 +31,10 @@ from invokeai.backend.patches.lora_conversions.flux_kohya_lora_conversion_utils
|
||||
is_state_dict_likely_in_flux_kohya_format,
|
||||
lora_model_from_flux_kohya_state_dict,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_onetrainer_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_onetrainer_format,
|
||||
lora_model_from_flux_onetrainer_state_dict,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.sd_lora_conversion_utils import lora_model_from_sd_state_dict
|
||||
from invokeai.backend.patches.lora_conversions.sdxl_lora_conversion_utils import convert_sdxl_keys_to_diffusers_format
|
||||
|
||||
@@ -84,8 +88,12 @@ class LoRALoader(ModelLoader):
|
||||
elif config.format == ModelFormat.LyCORIS:
|
||||
if is_state_dict_likely_in_flux_kohya_format(state_dict=state_dict):
|
||||
model = lora_model_from_flux_kohya_state_dict(state_dict=state_dict)
|
||||
elif is_state_dict_likely_in_flux_onetrainer_format(state_dict=state_dict):
|
||||
model = lora_model_from_flux_onetrainer_state_dict(state_dict=state_dict)
|
||||
elif is_state_dict_likely_flux_control(state_dict=state_dict):
|
||||
model = lora_model_from_flux_control_state_dict(state_dict=state_dict)
|
||||
else:
|
||||
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
|
||||
else:
|
||||
raise ValueError(f"LoRA model is in unsupported FLUX format: {config.format}")
|
||||
elif self._model_base in [BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]:
|
||||
|
||||
@@ -46,6 +46,9 @@ from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_ut
|
||||
from invokeai.backend.patches.lora_conversions.flux_kohya_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_kohya_format,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_onetrainer_lora_conversion_utils import (
|
||||
is_state_dict_likely_in_flux_onetrainer_format,
|
||||
)
|
||||
from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
|
||||
from invokeai.backend.quantization.gguf.loaders import gguf_sd_loader
|
||||
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
|
||||
@@ -283,7 +286,7 @@ class ModelProbe(object):
|
||||
return ModelType.Main
|
||||
elif key.startswith(("encoder.conv_in", "decoder.conv_in")):
|
||||
return ModelType.VAE
|
||||
elif key.startswith(("lora_te_", "lora_unet_")):
|
||||
elif key.startswith(("lora_te_", "lora_unet_", "lora_te1_", "lora_te2_", "lora_transformer_")):
|
||||
return ModelType.LoRA
|
||||
# "lora_A.weight" and "lora_B.weight" are associated with models in PEFT format. We don't support all PEFT
|
||||
# LoRA models, but as of the time of writing, we support Diffusers FLUX PEFT LoRA models.
|
||||
@@ -632,6 +635,7 @@ class LoRACheckpointProbe(CheckpointProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
if (
|
||||
is_state_dict_likely_in_flux_kohya_format(self.checkpoint)
|
||||
or is_state_dict_likely_in_flux_onetrainer_format(self.checkpoint)
|
||||
or is_state_dict_likely_in_flux_diffusers_format(self.checkpoint)
|
||||
or is_state_dict_likely_flux_control(self.checkpoint)
|
||||
):
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
from typing import Optional, Sequence
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.patches.layers.lora_layer import LoRALayer
|
||||
from invokeai.backend.patches.layers.lora_layer_base import LoRALayerBase
|
||||
|
||||
|
||||
class ConcatenatedLoRALayer(LoRALayerBase):
|
||||
"""A LoRA layer that is composed of multiple LoRA layers concatenated along a specified axis.
|
||||
|
||||
This class was created to handle a special case with FLUX LoRA models. In the BFL FLUX model format, the attention
|
||||
Q, K, V matrices are concatenated along the first dimension. In the diffusers LoRA format, the Q, K, V matrices are
|
||||
stored as separate tensors. This class enables diffusers LoRA layers to be used in BFL FLUX models.
|
||||
"""
|
||||
|
||||
def __init__(self, lora_layers: Sequence[LoRALayer], concat_axis: int = 0):
|
||||
super().__init__(alpha=None, bias=None)
|
||||
|
||||
self.lora_layers = lora_layers
|
||||
self.concat_axis = concat_axis
|
||||
|
||||
def _rank(self) -> int | None:
|
||||
return None
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
# TODO(ryand): Currently, we pass orig_weight=None to the sub-layers. If we want to support sub-layers that
|
||||
# require this value, we will need to implement chunking of the original weight tensor here.
|
||||
# Note that we must apply the sub-layer scales here.
|
||||
layer_weights = [lora_layer.get_weight(None) * lora_layer.scale() for lora_layer in self.lora_layers] # pyright: ignore[reportArgumentType]
|
||||
return torch.cat(layer_weights, dim=self.concat_axis)
|
||||
|
||||
def get_bias(self, orig_bias: torch.Tensor | None) -> Optional[torch.Tensor]:
|
||||
# TODO(ryand): Currently, we pass orig_bias=None to the sub-layers. If we want to support sub-layers that
|
||||
# require this value, we will need to implement chunking of the original bias tensor here.
|
||||
# Note that we must apply the sub-layer scales here.
|
||||
layer_biases: list[torch.Tensor] = []
|
||||
for lora_layer in self.lora_layers:
|
||||
layer_bias = lora_layer.get_bias(None)
|
||||
if layer_bias is not None:
|
||||
layer_biases.append(layer_bias * lora_layer.scale())
|
||||
|
||||
if len(layer_biases) == 0:
|
||||
return None
|
||||
|
||||
assert len(layer_biases) == len(self.lora_layers)
|
||||
return torch.cat(layer_biases, dim=self.concat_axis)
|
||||
|
||||
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
|
||||
super().to(device=device, dtype=dtype)
|
||||
for lora_layer in self.lora_layers:
|
||||
lora_layer.to(device=device, dtype=dtype)
|
||||
|
||||
def calc_size(self) -> int:
|
||||
return super().calc_size() + sum(lora_layer.calc_size() for lora_layer in self.lora_layers)
|
||||
115
invokeai/backend/patches/layers/dora_layer.py
Normal file
115
invokeai/backend/patches/layers/dora_layer.py
Normal file
@@ -0,0 +1,115 @@
|
||||
from typing import Dict, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.cast_to_device import cast_to_device
|
||||
from invokeai.backend.patches.layers.lora_layer_base import LoRALayerBase
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
|
||||
|
||||
|
||||
class DoRALayer(LoRALayerBase):
|
||||
"""A DoRA layer. As defined in https://arxiv.org/pdf/2402.09353."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
up: torch.Tensor,
|
||||
down: torch.Tensor,
|
||||
dora_scale: torch.Tensor,
|
||||
alpha: float | None,
|
||||
bias: Optional[torch.Tensor],
|
||||
):
|
||||
super().__init__(alpha, bias)
|
||||
self.up = up
|
||||
self.down = down
|
||||
self.dora_scale = dora_scale
|
||||
|
||||
@classmethod
|
||||
def from_state_dict_values(cls, values: Dict[str, torch.Tensor]):
|
||||
alpha = cls._parse_alpha(values.get("alpha", None))
|
||||
bias = cls._parse_bias(
|
||||
values.get("bias_indices", None), values.get("bias_values", None), values.get("bias_size", None)
|
||||
)
|
||||
|
||||
layer = cls(
|
||||
up=values["lora_up.weight"],
|
||||
down=values["lora_down.weight"],
|
||||
dora_scale=values["dora_scale"],
|
||||
alpha=alpha,
|
||||
bias=bias,
|
||||
)
|
||||
|
||||
cls.warn_on_unhandled_keys(
|
||||
values=values,
|
||||
handled_keys={
|
||||
# Default keys.
|
||||
"alpha",
|
||||
"bias_indices",
|
||||
"bias_values",
|
||||
"bias_size",
|
||||
# Layer-specific keys.
|
||||
"lora_up.weight",
|
||||
"lora_down.weight",
|
||||
"dora_scale",
|
||||
},
|
||||
)
|
||||
|
||||
return layer
|
||||
|
||||
def _rank(self) -> int:
|
||||
return self.down.shape[0]
|
||||
|
||||
def get_weight(self, orig_weight: torch.Tensor) -> torch.Tensor:
|
||||
orig_weight = cast_to_device(orig_weight, self.up.device)
|
||||
|
||||
# Note: Variable names (e.g. delta_v) are based on the paper.
|
||||
delta_v = self.up.reshape(self.up.shape[0], -1) @ self.down.reshape(self.down.shape[0], -1)
|
||||
delta_v = delta_v.reshape(orig_weight.shape)
|
||||
|
||||
delta_v = delta_v * self.scale()
|
||||
|
||||
# At this point, out_weight is the unnormalized direction matrix.
|
||||
out_weight = orig_weight + delta_v
|
||||
|
||||
# TODO(ryand): Simplify this logic.
|
||||
direction_norm = (
|
||||
out_weight.transpose(0, 1)
|
||||
.reshape(out_weight.shape[1], -1)
|
||||
.norm(dim=1, keepdim=True)
|
||||
.reshape(out_weight.shape[1], *[1] * (out_weight.dim() - 1))
|
||||
.transpose(0, 1)
|
||||
)
|
||||
|
||||
out_weight *= self.dora_scale / direction_norm
|
||||
|
||||
return out_weight - orig_weight
|
||||
|
||||
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
|
||||
super().to(device=device, dtype=dtype)
|
||||
self.up = self.up.to(device=device, dtype=dtype)
|
||||
self.down = self.down.to(device=device, dtype=dtype)
|
||||
self.dora_scale = self.dora_scale.to(device=device, dtype=dtype)
|
||||
|
||||
def calc_size(self) -> int:
|
||||
return super().calc_size() + calc_tensors_size([self.up, self.down, self.dora_scale])
|
||||
|
||||
def get_parameters(self, orig_parameters: dict[str, torch.Tensor], weight: float) -> dict[str, torch.Tensor]:
|
||||
if any(p.device.type == "meta" for p in orig_parameters.values()):
|
||||
# If any of the original parameters are on the 'meta' device, we assume this is because the base model is in
|
||||
# a quantization format that doesn't allow easy dequantization.
|
||||
raise RuntimeError(
|
||||
"The base model quantization format (likely bitsandbytes) is not compatible with DoRA patches."
|
||||
)
|
||||
|
||||
scale = self.scale()
|
||||
params = {"weight": self.get_weight(orig_parameters["weight"]) * weight}
|
||||
bias = self.get_bias(orig_parameters.get("bias", None))
|
||||
if bias is not None:
|
||||
params["bias"] = bias * (weight * scale)
|
||||
|
||||
# Reshape all params to match the original module's shape.
|
||||
for param_name, param_weight in params.items():
|
||||
orig_param = orig_parameters[param_name]
|
||||
if param_weight.shape != orig_param.shape:
|
||||
params[param_name] = param_weight.reshape(orig_param.shape)
|
||||
|
||||
return params
|
||||
@@ -4,6 +4,7 @@ import torch
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.param_shape_utils import get_param_shape
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
|
||||
|
||||
|
||||
@@ -67,8 +68,8 @@ class LoRALayerBase(BaseLayerPatch):
|
||||
# Reshape all params to match the original module's shape.
|
||||
for param_name, param_weight in params.items():
|
||||
orig_param = orig_parameters[param_name]
|
||||
if param_weight.shape != orig_param.shape:
|
||||
params[param_name] = param_weight.reshape(orig_param.shape)
|
||||
if param_weight.shape != get_param_shape(orig_param):
|
||||
params[param_name] = param_weight.reshape(get_param_shape(orig_param))
|
||||
|
||||
return params
|
||||
|
||||
|
||||
65
invokeai/backend/patches/layers/merged_layer_patch.py
Normal file
65
invokeai/backend/patches/layers/merged_layer_patch.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Sequence
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.param_shape_utils import get_param_shape
|
||||
|
||||
|
||||
@dataclass
|
||||
class Range:
|
||||
start: int
|
||||
end: int
|
||||
|
||||
|
||||
class MergedLayerPatch(BaseLayerPatch):
|
||||
"""A patch layer that is composed of multiple sub-layers merged together.
|
||||
|
||||
This class was created to handle a special case with FLUX LoRA models. In the BFL FLUX model format, the attention
|
||||
Q, K, V matrices are concatenated along the first dimension. In the diffusers LoRA format, the Q, K, V matrices are
|
||||
stored as separate tensors. This class enables diffusers LoRA layers to be used in BFL FLUX models.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
lora_layers: Sequence[BaseLayerPatch],
|
||||
ranges: Sequence[Range],
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
self.lora_layers = lora_layers
|
||||
# self.ranges[i] is the range for the i'th lora layer along the 0'th weight dimension.
|
||||
self.ranges = ranges
|
||||
assert len(self.ranges) == len(self.lora_layers)
|
||||
|
||||
def get_parameters(self, orig_parameters: dict[str, torch.Tensor], weight: float) -> dict[str, torch.Tensor]:
|
||||
out_parameters: dict[str, torch.Tensor] = {}
|
||||
|
||||
for lora_layer, range in zip(self.lora_layers, self.ranges, strict=True):
|
||||
sliced_parameters: dict[str, torch.Tensor] = {
|
||||
n: p[range.start : range.end] for n, p in orig_parameters.items()
|
||||
}
|
||||
|
||||
# Note that `weight` is applied in the sub-layers, no need to apply it in this function.
|
||||
layer_out_parameters = lora_layer.get_parameters(sliced_parameters, weight)
|
||||
|
||||
for out_param_name, out_param in layer_out_parameters.items():
|
||||
if out_param_name not in out_parameters:
|
||||
# If not already in the output dict, initialize an output tensor with the same shape as the full
|
||||
# original parameter.
|
||||
out_parameters[out_param_name] = torch.zeros(
|
||||
get_param_shape(orig_parameters[out_param_name]),
|
||||
dtype=out_param.dtype,
|
||||
device=out_param.device,
|
||||
)
|
||||
out_parameters[out_param_name][range.start : range.end] += out_param
|
||||
|
||||
return out_parameters
|
||||
|
||||
def to(self, device: torch.device | None = None, dtype: torch.dtype | None = None):
|
||||
for lora_layer in self.lora_layers:
|
||||
lora_layer.to(device=device, dtype=dtype)
|
||||
|
||||
def calc_size(self) -> int:
|
||||
return sum(lora_layer.calc_size() for lora_layer in self.lora_layers)
|
||||
19
invokeai/backend/patches/layers/param_shape_utils.py
Normal file
19
invokeai/backend/patches/layers/param_shape_utils.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import torch
|
||||
|
||||
try:
|
||||
from bitsandbytes.nn.modules import Params4bit
|
||||
|
||||
bnb_available: bool = True
|
||||
except ImportError:
|
||||
bnb_available: bool = False
|
||||
|
||||
|
||||
def get_param_shape(param: torch.Tensor) -> torch.Size:
|
||||
"""A helper function to get the shape of a parameter that handles `bitsandbytes.nn.Params4Bit` correctly."""
|
||||
# Accessing the `.shape` attribute of `bitsandbytes.nn.Params4Bit` will return an incorrect result. Instead, we must
|
||||
# access the `.quant_state.shape` attribute.
|
||||
if bnb_available and type(param) is Params4bit: # type: ignore
|
||||
quant_state = param.quant_state
|
||||
if quant_state is not None:
|
||||
return quant_state.shape
|
||||
return param.shape
|
||||
@@ -3,6 +3,7 @@ from typing import Dict
|
||||
import torch
|
||||
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.dora_layer import DoRALayer
|
||||
from invokeai.backend.patches.layers.full_layer import FullLayer
|
||||
from invokeai.backend.patches.layers.ia3_layer import IA3Layer
|
||||
from invokeai.backend.patches.layers.loha_layer import LoHALayer
|
||||
@@ -14,8 +15,9 @@ from invokeai.backend.patches.layers.norm_layer import NormLayer
|
||||
def any_lora_layer_from_state_dict(state_dict: Dict[str, torch.Tensor]) -> BaseLayerPatch:
|
||||
# Detect layers according to LyCORIS detection logic(`weight_list_det`)
|
||||
# https://github.com/KohakuBlueleaf/LyCORIS/tree/8ad8000efb79e2b879054da8c9356e6143591bad/lycoris/modules
|
||||
|
||||
if "lora_up.weight" in state_dict:
|
||||
if "dora_scale" in state_dict:
|
||||
return DoRALayer.from_state_dict_values(state_dict)
|
||||
elif "lora_up.weight" in state_dict:
|
||||
# LoRA a.k.a LoCon
|
||||
return LoRALayer.from_state_dict_values(state_dict)
|
||||
elif "hada_w1_a" in state_dict:
|
||||
|
||||
@@ -3,8 +3,8 @@ from typing import Dict
|
||||
import torch
|
||||
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.concatenated_lora_layer import ConcatenatedLoRALayer
|
||||
from invokeai.backend.patches.layers.lora_layer import LoRALayer
|
||||
from invokeai.backend.patches.layers.merged_layer_patch import MergedLayerPatch, Range
|
||||
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
|
||||
@@ -33,13 +33,21 @@ def is_state_dict_likely_in_flux_diffusers_format(state_dict: Dict[str, torch.Te
|
||||
def lora_model_from_flux_diffusers_state_dict(
|
||||
state_dict: Dict[str, torch.Tensor], alpha: float | None
|
||||
) -> ModelPatchRaw:
|
||||
"""Loads a state dict in the Diffusers FLUX LoRA format into a LoRAModelRaw object.
|
||||
# Group keys by layer.
|
||||
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = _group_by_layer(state_dict)
|
||||
layers = lora_layers_from_flux_diffusers_grouped_state_dict(grouped_state_dict, alpha)
|
||||
return ModelPatchRaw(layers=layers)
|
||||
|
||||
|
||||
def lora_layers_from_flux_diffusers_grouped_state_dict(
|
||||
grouped_state_dict: Dict[str, Dict[str, torch.Tensor]], alpha: float | None
|
||||
) -> dict[str, BaseLayerPatch]:
|
||||
"""Converts a grouped state dict with Diffusers FLUX LoRA keys to LoRA layers with BFL keys (i.e. the module key
|
||||
format used by Invoke).
|
||||
|
||||
This function is based on:
|
||||
https://github.com/huggingface/diffusers/blob/55ac421f7bb12fd00ccbef727be4dc2f3f920abb/scripts/convert_flux_to_diffusers.py
|
||||
"""
|
||||
# Group keys by layer.
|
||||
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = _group_by_layer(state_dict)
|
||||
|
||||
# Remove the "transformer." prefix from all keys.
|
||||
grouped_state_dict = {k.replace("transformer.", ""): v for k, v in grouped_state_dict.items()}
|
||||
@@ -53,17 +61,26 @@ def lora_model_from_flux_diffusers_state_dict(
|
||||
|
||||
layers: dict[str, BaseLayerPatch] = {}
|
||||
|
||||
def add_lora_layer_if_present(src_key: str, dst_key: str) -> None:
|
||||
if src_key in grouped_state_dict:
|
||||
src_layer_dict = grouped_state_dict.pop(src_key)
|
||||
value = {
|
||||
def get_lora_layer_values(src_layer_dict: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
|
||||
if "lora_A.weight" in src_layer_dict:
|
||||
# The LoRA keys are in PEFT format.
|
||||
values = {
|
||||
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
|
||||
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
|
||||
}
|
||||
if alpha is not None:
|
||||
value["alpha"] = torch.tensor(alpha)
|
||||
layers[dst_key] = LoRALayer.from_state_dict_values(values=value)
|
||||
values["alpha"] = torch.tensor(alpha)
|
||||
assert len(src_layer_dict) == 0
|
||||
return values
|
||||
else:
|
||||
# Assume that the LoRA keys are in Kohya format.
|
||||
return src_layer_dict
|
||||
|
||||
def add_lora_layer_if_present(src_key: str, dst_key: str) -> None:
|
||||
if src_key in grouped_state_dict:
|
||||
src_layer_dict = grouped_state_dict.pop(src_key)
|
||||
values = get_lora_layer_values(src_layer_dict)
|
||||
layers[dst_key] = any_lora_layer_from_state_dict(values)
|
||||
|
||||
def add_qkv_lora_layer_if_present(
|
||||
src_keys: list[str],
|
||||
@@ -79,29 +96,24 @@ def lora_model_from_flux_diffusers_state_dict(
|
||||
if not any(keys_present):
|
||||
return
|
||||
|
||||
sub_layers: list[LoRALayer] = []
|
||||
dim_0_offset = 0
|
||||
sub_layers: list[BaseLayerPatch] = []
|
||||
sub_layer_ranges: list[Range] = []
|
||||
for src_key, src_weight_shape in zip(src_keys, src_weight_shapes, strict=True):
|
||||
src_layer_dict = grouped_state_dict.pop(src_key, None)
|
||||
if src_layer_dict is not None:
|
||||
values = {
|
||||
"lora_down.weight": src_layer_dict.pop("lora_A.weight"),
|
||||
"lora_up.weight": src_layer_dict.pop("lora_B.weight"),
|
||||
}
|
||||
if alpha is not None:
|
||||
values["alpha"] = torch.tensor(alpha)
|
||||
assert values["lora_down.weight"].shape[1] == src_weight_shape[1]
|
||||
assert values["lora_up.weight"].shape[0] == src_weight_shape[0]
|
||||
sub_layers.append(LoRALayer.from_state_dict_values(values=values))
|
||||
assert len(src_layer_dict) == 0
|
||||
values = get_lora_layer_values(src_layer_dict)
|
||||
# assert values["lora_down.weight"].shape[1] == src_weight_shape[1]
|
||||
# assert values["lora_up.weight"].shape[0] == src_weight_shape[0]
|
||||
sub_layers.append(any_lora_layer_from_state_dict(values))
|
||||
sub_layer_ranges.append(Range(dim_0_offset, dim_0_offset + src_weight_shape[0]))
|
||||
else:
|
||||
if not allow_missing_keys:
|
||||
raise ValueError(f"Missing LoRA layer: '{src_key}'.")
|
||||
values = {
|
||||
"lora_up.weight": torch.zeros((src_weight_shape[0], 1)),
|
||||
"lora_down.weight": torch.zeros((1, src_weight_shape[1])),
|
||||
}
|
||||
sub_layers.append(LoRALayer.from_state_dict_values(values=values))
|
||||
layers[dst_qkv_key] = ConcatenatedLoRALayer(lora_layers=sub_layers)
|
||||
|
||||
dim_0_offset += src_weight_shape[0]
|
||||
|
||||
layers[dst_qkv_key] = MergedLayerPatch(sub_layers, sub_layer_ranges)
|
||||
|
||||
# time_text_embed.timestep_embedder -> time_in.
|
||||
add_lora_layer_if_present("time_text_embed.timestep_embedder.linear_1", "time_in.in_layer")
|
||||
@@ -217,7 +229,7 @@ def lora_model_from_flux_diffusers_state_dict(
|
||||
|
||||
layers_with_prefix = {f"{FLUX_LORA_TRANSFORMER_PREFIX}{k}": v for k, v in layers.items()}
|
||||
|
||||
return ModelPatchRaw(layers=layers_with_prefix)
|
||||
return layers_with_prefix
|
||||
|
||||
|
||||
def _group_by_layer(state_dict: Dict[str, torch.Tensor]) -> dict[str, dict[str, torch.Tensor]]:
|
||||
|
||||
@@ -7,6 +7,7 @@ from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import (
|
||||
FLUX_LORA_CLIP_PREFIX,
|
||||
FLUX_LORA_T5_PREFIX,
|
||||
FLUX_LORA_TRANSFORMER_PREFIX,
|
||||
)
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
@@ -26,6 +27,14 @@ FLUX_KOHYA_TRANSFORMER_KEY_REGEX = (
|
||||
# lora_te1_text_model_encoder_layers_0_mlp_fc1.lora_up.weight
|
||||
FLUX_KOHYA_CLIP_KEY_REGEX = r"lora_te1_text_model_encoder_layers_(\d+)_(mlp|self_attn)_(\w+)\.?.*"
|
||||
|
||||
# A regex pattern that matches all of the T5 keys in the Kohya FLUX LoRA format.
|
||||
# Example keys:
|
||||
# lora_te2_encoder_block_0_layer_0_SelfAttention_k.alpha
|
||||
# lora_te2_encoder_block_0_layer_0_SelfAttention_k.dora_scale
|
||||
# lora_te2_encoder_block_0_layer_0_SelfAttention_k.lora_down.weight
|
||||
# lora_te2_encoder_block_0_layer_0_SelfAttention_k.lora_up.weight
|
||||
FLUX_KOHYA_T5_KEY_REGEX = r"lora_te2_encoder_block_(\d+)_layer_(\d+)_(DenseReluDense|SelfAttention)_(\w+)_?(\w+)?\.?.*"
|
||||
|
||||
|
||||
def is_state_dict_likely_in_flux_kohya_format(state_dict: Dict[str, Any]) -> bool:
|
||||
"""Checks if the provided state dict is likely in the Kohya FLUX LoRA format.
|
||||
@@ -34,7 +43,9 @@ def is_state_dict_likely_in_flux_kohya_format(state_dict: Dict[str, Any]) -> boo
|
||||
perfect-precision detector would require checking all keys against a whitelist and verifying tensor shapes.)
|
||||
"""
|
||||
return all(
|
||||
re.match(FLUX_KOHYA_TRANSFORMER_KEY_REGEX, k) or re.match(FLUX_KOHYA_CLIP_KEY_REGEX, k)
|
||||
re.match(FLUX_KOHYA_TRANSFORMER_KEY_REGEX, k)
|
||||
or re.match(FLUX_KOHYA_CLIP_KEY_REGEX, k)
|
||||
or re.match(FLUX_KOHYA_T5_KEY_REGEX, k)
|
||||
for k in state_dict.keys()
|
||||
)
|
||||
|
||||
@@ -48,27 +59,34 @@ def lora_model_from_flux_kohya_state_dict(state_dict: Dict[str, torch.Tensor]) -
|
||||
grouped_state_dict[layer_name] = {}
|
||||
grouped_state_dict[layer_name][param_name] = value
|
||||
|
||||
# Split the grouped state dict into transformer and CLIP state dicts.
|
||||
# Split the grouped state dict into transformer, CLIP, and T5 state dicts.
|
||||
transformer_grouped_sd: dict[str, dict[str, torch.Tensor]] = {}
|
||||
clip_grouped_sd: dict[str, dict[str, torch.Tensor]] = {}
|
||||
t5_grouped_sd: dict[str, dict[str, torch.Tensor]] = {}
|
||||
for layer_name, layer_state_dict in grouped_state_dict.items():
|
||||
if layer_name.startswith("lora_unet"):
|
||||
transformer_grouped_sd[layer_name] = layer_state_dict
|
||||
elif layer_name.startswith("lora_te1"):
|
||||
clip_grouped_sd[layer_name] = layer_state_dict
|
||||
elif layer_name.startswith("lora_te2"):
|
||||
t5_grouped_sd[layer_name] = layer_state_dict
|
||||
else:
|
||||
raise ValueError(f"Layer '{layer_name}' does not match the expected pattern for FLUX LoRA weights.")
|
||||
|
||||
# Convert the state dicts to the InvokeAI format.
|
||||
transformer_grouped_sd = _convert_flux_transformer_kohya_state_dict_to_invoke_format(transformer_grouped_sd)
|
||||
clip_grouped_sd = _convert_flux_clip_kohya_state_dict_to_invoke_format(clip_grouped_sd)
|
||||
t5_grouped_sd = _convert_flux_t5_kohya_state_dict_to_invoke_format(t5_grouped_sd)
|
||||
|
||||
# Create LoRA layers.
|
||||
layers: dict[str, BaseLayerPatch] = {}
|
||||
for layer_key, layer_state_dict in transformer_grouped_sd.items():
|
||||
layers[FLUX_LORA_TRANSFORMER_PREFIX + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
|
||||
for layer_key, layer_state_dict in clip_grouped_sd.items():
|
||||
layers[FLUX_LORA_CLIP_PREFIX + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
|
||||
for model_prefix, grouped_sd in [
|
||||
(FLUX_LORA_TRANSFORMER_PREFIX, transformer_grouped_sd),
|
||||
(FLUX_LORA_CLIP_PREFIX, clip_grouped_sd),
|
||||
(FLUX_LORA_T5_PREFIX, t5_grouped_sd),
|
||||
]:
|
||||
for layer_key, layer_state_dict in grouped_sd.items():
|
||||
layers[model_prefix + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
|
||||
|
||||
# Create and return the LoRAModelRaw.
|
||||
return ModelPatchRaw(layers=layers)
|
||||
@@ -123,3 +141,31 @@ def _convert_flux_transformer_kohya_state_dict_to_invoke_format(state_dict: Dict
|
||||
raise ValueError(f"Key '{k}' does not match the expected pattern for FLUX LoRA weights.")
|
||||
|
||||
return converted_dict
|
||||
|
||||
|
||||
def _convert_flux_t5_kohya_state_dict_to_invoke_format(state_dict: Dict[str, T]) -> Dict[str, T]:
|
||||
"""Converts a T5 LoRA state dict from the Kohya FLUX LoRA format to LoRA weight format used internally by
|
||||
InvokeAI.
|
||||
|
||||
Example key conversions:
|
||||
|
||||
"lora_te2_encoder_block_0_layer_0_SelfAttention_k" -> "encoder.block.0.layer.0.SelfAttention.k"
|
||||
"lora_te2_encoder_block_0_layer_1_DenseReluDense_wi_0" -> "encoder.block.0.layer.1.DenseReluDense.wi.0"
|
||||
"""
|
||||
|
||||
def replace_func(match: re.Match[str]) -> str:
|
||||
s = f"encoder.block.{match.group(1)}.layer.{match.group(2)}.{match.group(3)}.{match.group(4)}"
|
||||
if match.group(5):
|
||||
s += f".{match.group(5)}"
|
||||
return s
|
||||
|
||||
converted_dict: dict[str, T] = {}
|
||||
for k, v in state_dict.items():
|
||||
match = re.match(FLUX_KOHYA_T5_KEY_REGEX, k)
|
||||
if match:
|
||||
new_key = re.sub(FLUX_KOHYA_T5_KEY_REGEX, replace_func, k)
|
||||
converted_dict[new_key] = v
|
||||
else:
|
||||
raise ValueError(f"Key '{k}' does not match the expected pattern for FLUX LoRA weights.")
|
||||
|
||||
return converted_dict
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# Prefixes used to distinguish between transformer and CLIP text encoder keys in the FLUX InvokeAI LoRA format.
|
||||
FLUX_LORA_TRANSFORMER_PREFIX = "lora_transformer-"
|
||||
FLUX_LORA_CLIP_PREFIX = "lora_clip-"
|
||||
FLUX_LORA_T5_PREFIX = "lora_t5-"
|
||||
|
||||
@@ -0,0 +1,163 @@
|
||||
import re
|
||||
from typing import Any, Dict
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
|
||||
from invokeai.backend.patches.lora_conversions.flux_diffusers_lora_conversion_utils import (
|
||||
lora_layers_from_flux_diffusers_grouped_state_dict,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_kohya_lora_conversion_utils import (
|
||||
FLUX_KOHYA_CLIP_KEY_REGEX,
|
||||
FLUX_KOHYA_T5_KEY_REGEX,
|
||||
_convert_flux_clip_kohya_state_dict_to_invoke_format,
|
||||
_convert_flux_t5_kohya_state_dict_to_invoke_format,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import (
|
||||
FLUX_LORA_CLIP_PREFIX,
|
||||
FLUX_LORA_T5_PREFIX,
|
||||
)
|
||||
from invokeai.backend.patches.lora_conversions.kohya_key_utils import (
|
||||
INDEX_PLACEHOLDER,
|
||||
ParsingTree,
|
||||
insert_periods_into_kohya_key,
|
||||
)
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
|
||||
# A regex pattern that matches all of the transformer keys in the OneTrainer FLUX LoRA format.
|
||||
# The OneTrainer format uses a mix of the Kohya and Diffusers formats:
|
||||
# - The base model keys are in Diffusers format.
|
||||
# - Periods are replaced with underscores, to match Kohya.
|
||||
# - The LoRA key suffixes (e.g. .alpha, .lora_down.weight, .lora_up.weight) match Kohya.
|
||||
# Example keys:
|
||||
# - "lora_transformer_single_transformer_blocks_0_attn_to_k.alpha"
|
||||
# - "lora_transformer_single_transformer_blocks_0_attn_to_k.dora_scale"
|
||||
# - "lora_transformer_single_transformer_blocks_0_attn_to_k.lora_down.weight"
|
||||
# - "lora_transformer_single_transformer_blocks_0_attn_to_k.lora_up.weight"
|
||||
FLUX_ONETRAINER_TRANSFORMER_KEY_REGEX = (
|
||||
r"lora_transformer_(single_transformer_blocks|transformer_blocks)_(\d+)_(\w+)\.(.*)"
|
||||
)
|
||||
|
||||
|
||||
def is_state_dict_likely_in_flux_onetrainer_format(state_dict: Dict[str, Any]) -> bool:
|
||||
"""Checks if the provided state dict is likely in the OneTrainer FLUX LoRA format.
|
||||
|
||||
This is intended to be a high-precision detector, but it is not guaranteed to have perfect precision. (A
|
||||
perfect-precision detector would require checking all keys against a whitelist and verifying tensor shapes.)
|
||||
|
||||
Note that OneTrainer matches the Kohya format for the CLIP and T5 models.
|
||||
"""
|
||||
return all(
|
||||
re.match(FLUX_ONETRAINER_TRANSFORMER_KEY_REGEX, k)
|
||||
or re.match(FLUX_KOHYA_CLIP_KEY_REGEX, k)
|
||||
or re.match(FLUX_KOHYA_T5_KEY_REGEX, k)
|
||||
for k in state_dict.keys()
|
||||
)
|
||||
|
||||
|
||||
def lora_model_from_flux_onetrainer_state_dict(state_dict: Dict[str, torch.Tensor]) -> ModelPatchRaw: # type: ignore
|
||||
# Group keys by layer.
|
||||
grouped_state_dict: dict[str, dict[str, torch.Tensor]] = {}
|
||||
for key, value in state_dict.items():
|
||||
layer_name, param_name = key.split(".", 1)
|
||||
if layer_name not in grouped_state_dict:
|
||||
grouped_state_dict[layer_name] = {}
|
||||
grouped_state_dict[layer_name][param_name] = value
|
||||
|
||||
# Split the grouped state dict into transformer, CLIP, and T5 state dicts.
|
||||
transformer_grouped_sd: dict[str, dict[str, torch.Tensor]] = {}
|
||||
clip_grouped_sd: dict[str, dict[str, torch.Tensor]] = {}
|
||||
t5_grouped_sd: dict[str, dict[str, torch.Tensor]] = {}
|
||||
for layer_name, layer_state_dict in grouped_state_dict.items():
|
||||
if layer_name.startswith("lora_transformer"):
|
||||
transformer_grouped_sd[layer_name] = layer_state_dict
|
||||
elif layer_name.startswith("lora_te1"):
|
||||
clip_grouped_sd[layer_name] = layer_state_dict
|
||||
elif layer_name.startswith("lora_te2"):
|
||||
t5_grouped_sd[layer_name] = layer_state_dict
|
||||
else:
|
||||
raise ValueError(f"Layer '{layer_name}' does not match the expected pattern for FLUX LoRA weights.")
|
||||
|
||||
# Convert the state dicts to the InvokeAI format.
|
||||
clip_grouped_sd = _convert_flux_clip_kohya_state_dict_to_invoke_format(clip_grouped_sd)
|
||||
t5_grouped_sd = _convert_flux_t5_kohya_state_dict_to_invoke_format(t5_grouped_sd)
|
||||
|
||||
# Create LoRA layers.
|
||||
layers: dict[str, BaseLayerPatch] = {}
|
||||
for model_prefix, grouped_sd in [
|
||||
# (FLUX_LORA_TRANSFORMER_PREFIX, transformer_grouped_sd),
|
||||
(FLUX_LORA_CLIP_PREFIX, clip_grouped_sd),
|
||||
(FLUX_LORA_T5_PREFIX, t5_grouped_sd),
|
||||
]:
|
||||
for layer_key, layer_state_dict in grouped_sd.items():
|
||||
layers[model_prefix + layer_key] = any_lora_layer_from_state_dict(layer_state_dict)
|
||||
|
||||
# Handle the transformer.
|
||||
transformer_layers = _convert_flux_transformer_onetrainer_state_dict_to_invoke_format(transformer_grouped_sd)
|
||||
layers.update(transformer_layers)
|
||||
|
||||
# Create and return the LoRAModelRaw.
|
||||
return ModelPatchRaw(layers=layers)
|
||||
|
||||
|
||||
# This parsing tree was generated by calling `generate_kohya_parsing_tree_from_keys()` on the keys in
|
||||
# flux_lora_diffusers_format.py.
|
||||
flux_transformer_kohya_parsing_tree: ParsingTree = {
|
||||
"transformer": {
|
||||
"single_transformer_blocks": {
|
||||
INDEX_PLACEHOLDER: {
|
||||
"attn": {"to_k": {}, "to_q": {}, "to_v": {}},
|
||||
"norm": {"linear": {}},
|
||||
"proj_mlp": {},
|
||||
"proj_out": {},
|
||||
}
|
||||
},
|
||||
"transformer_blocks": {
|
||||
INDEX_PLACEHOLDER: {
|
||||
"attn": {
|
||||
"add_k_proj": {},
|
||||
"add_q_proj": {},
|
||||
"add_v_proj": {},
|
||||
"to_add_out": {},
|
||||
"to_k": {},
|
||||
"to_out": {INDEX_PLACEHOLDER: {}},
|
||||
"to_q": {},
|
||||
"to_v": {},
|
||||
},
|
||||
"ff": {"net": {INDEX_PLACEHOLDER: {"proj": {}}}},
|
||||
"ff_context": {"net": {INDEX_PLACEHOLDER: {"proj": {}}}},
|
||||
"norm1": {"linear": {}},
|
||||
"norm1_context": {"linear": {}},
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _convert_flux_transformer_onetrainer_state_dict_to_invoke_format(
|
||||
state_dict: Dict[str, Dict[str, torch.Tensor]],
|
||||
) -> dict[str, BaseLayerPatch]:
|
||||
"""Converts a FLUX transformer LoRA state dict from the OneTrainer FLUX LoRA format to the LoRA weight format used
|
||||
internally by InvokeAI.
|
||||
"""
|
||||
|
||||
# Step 1: Convert the Kohya-style keys with underscores to classic keys with periods.
|
||||
# Example:
|
||||
# "lora_transformer_single_transformer_blocks_0_attn_to_k.lora_down.weight" -> "transformer.single_transformer_blocks.0.attn.to_k.lora_down.weight"
|
||||
lora_prefix = "lora_"
|
||||
lora_prefix_length = len(lora_prefix)
|
||||
kohya_state_dict: dict[str, Dict[str, torch.Tensor]] = {}
|
||||
for key in state_dict.keys():
|
||||
# Remove the "lora_" prefix.
|
||||
assert key.startswith(lora_prefix)
|
||||
new_key = key[lora_prefix_length:]
|
||||
|
||||
# Add periods to the Kohya-style module keys.
|
||||
new_key = insert_periods_into_kohya_key(new_key, flux_transformer_kohya_parsing_tree)
|
||||
|
||||
# Replace the old key with the new key.
|
||||
kohya_state_dict[new_key] = state_dict[key]
|
||||
|
||||
# Step 2: Convert diffusers module names to the BFL module names.
|
||||
return lora_layers_from_flux_diffusers_grouped_state_dict(kohya_state_dict, alpha=None)
|
||||
102
invokeai/backend/patches/lora_conversions/kohya_key_utils.py
Normal file
102
invokeai/backend/patches/lora_conversions/kohya_key_utils.py
Normal file
@@ -0,0 +1,102 @@
|
||||
from typing import Iterable
|
||||
|
||||
INDEX_PLACEHOLDER = "index_placeholder"
|
||||
|
||||
|
||||
# Type alias for a 'ParsingTree', which is a recursive dict with string keys.
|
||||
ParsingTree = dict[str, "ParsingTree"]
|
||||
|
||||
|
||||
def insert_periods_into_kohya_key(key: str, parsing_tree: ParsingTree) -> str:
|
||||
"""Insert periods into a Kohya key based on a parsing tree.
|
||||
|
||||
Kohya format keys are produced by replacing periods with underscores in the original key.
|
||||
|
||||
Example:
|
||||
```
|
||||
key = "module_a_module_b_0_attn_to_k"
|
||||
parsing_tree = {
|
||||
"module_a": {
|
||||
"module_b": {
|
||||
INDEX_PLACEHOLDER: {
|
||||
"attn": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
result = insert_periods_into_kohya_key(key, parsing_tree)
|
||||
> "module_a.module_b.0.attn.to_k"
|
||||
```
|
||||
"""
|
||||
# Split key into parts by underscore.
|
||||
parts = key.split("_")
|
||||
|
||||
# Build up result by walking through parsing tree and parts.
|
||||
result_parts: list[str] = []
|
||||
current_part = ""
|
||||
current_tree = parsing_tree
|
||||
|
||||
for part in parts:
|
||||
if len(current_part) > 0:
|
||||
current_part = current_part + "_"
|
||||
current_part += part
|
||||
|
||||
if current_part in current_tree:
|
||||
# Match found.
|
||||
current_tree = current_tree[current_part]
|
||||
result_parts.append(current_part)
|
||||
current_part = ""
|
||||
elif current_part.isnumeric() and INDEX_PLACEHOLDER in current_tree:
|
||||
# Match found with index placeholder.
|
||||
current_tree = current_tree[INDEX_PLACEHOLDER]
|
||||
result_parts.append(current_part)
|
||||
current_part = ""
|
||||
|
||||
if len(current_part) > 0:
|
||||
raise ValueError(f"Key {key} does not match parsing tree {parsing_tree}.")
|
||||
|
||||
return ".".join(result_parts)
|
||||
|
||||
|
||||
def generate_kohya_parsing_tree_from_keys(keys: Iterable[str]) -> ParsingTree:
|
||||
"""Generate a parsing tree from a list of keys.
|
||||
|
||||
Example:
|
||||
```
|
||||
keys = [
|
||||
"module_a.module_b.0.attn.to_k",
|
||||
"module_a.module_b.1.attn.to_k",
|
||||
"module_a.module_c.proj",
|
||||
]
|
||||
|
||||
tree = generate_kohya_parsing_tree_from_keys(keys)
|
||||
> {
|
||||
> "module_a": {
|
||||
> "module_b": {
|
||||
> INDEX_PLACEHOLDER: {
|
||||
> "attn": {
|
||||
> "to_k": {},
|
||||
> "to_q": {},
|
||||
> },
|
||||
> }
|
||||
> },
|
||||
> "module_c": {
|
||||
> "proj": {},
|
||||
> }
|
||||
> }
|
||||
> }
|
||||
```
|
||||
"""
|
||||
tree: ParsingTree = {}
|
||||
for key in keys:
|
||||
subtree: ParsingTree = tree
|
||||
for module_name in key.split("."):
|
||||
key = module_name
|
||||
if module_name.isnumeric():
|
||||
key = INDEX_PLACEHOLDER
|
||||
|
||||
if key not in subtree:
|
||||
subtree[key] = {}
|
||||
|
||||
subtree = subtree[key]
|
||||
return tree
|
||||
@@ -54,7 +54,9 @@ GGML_TENSOR_OP_TABLE = {
|
||||
torch.ops.aten.addmm.default: dequantize_and_run, # pyright: ignore
|
||||
torch.ops.aten.mul.Tensor: dequantize_and_run, # pyright: ignore
|
||||
torch.ops.aten.add.Tensor: dequantize_and_run, # pyright: ignore
|
||||
torch.ops.aten.sub.Tensor: dequantize_and_run, # pyright: ignore
|
||||
torch.ops.aten.allclose.default: dequantize_and_run, # pyright: ignore
|
||||
torch.ops.aten.slice.Tensor: dequantize_and_run, # pyright: ignore
|
||||
}
|
||||
|
||||
if torch.backends.mps.is_available():
|
||||
|
||||
@@ -23,6 +23,12 @@ module.exports = {
|
||||
property: 'randomUUID',
|
||||
message: 'Use of crypto.randomUUID is not allowed as it is not available in all browsers.',
|
||||
},
|
||||
{
|
||||
object: 'navigator',
|
||||
property: 'clipboard',
|
||||
message:
|
||||
'The Clipboard API is not available by default in Firefox. Use the `useClipboard` hook instead, which wraps clipboard access to prevent errors.',
|
||||
},
|
||||
],
|
||||
},
|
||||
overrides: [
|
||||
|
||||
@@ -11,9 +11,11 @@
|
||||
<link id="invoke-favicon" rel="icon" type="icon" href="assets/images/invoke-favicon.svg" />
|
||||
<style>
|
||||
html,
|
||||
body {
|
||||
body,
|
||||
#root {
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
overflow: hidden;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
@@ -23,4 +25,4 @@
|
||||
<script type="module" src="/src/main.tsx"></script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
</html>
|
||||
@@ -58,10 +58,11 @@
|
||||
"@dagrejs/dagre": "^1.1.4",
|
||||
"@dagrejs/graphlib": "^2.2.4",
|
||||
"@fontsource-variable/inter": "^5.1.0",
|
||||
"@invoke-ai/ui-library": "^0.0.44",
|
||||
"@invoke-ai/ui-library": "^0.0.46",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@reduxjs/toolkit": "2.5.1",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
"@xyflow/react": "^12.4.2",
|
||||
"async-mutex": "^0.5.0",
|
||||
"chakra-react-select": "^4.9.2",
|
||||
"cmdk": "^1.0.0",
|
||||
@@ -96,9 +97,9 @@
|
||||
"react-icons": "^5.3.0",
|
||||
"react-redux": "9.1.2",
|
||||
"react-resizable-panels": "^2.1.4",
|
||||
"react-textarea-autosize": "^8.5.7",
|
||||
"react-use": "^17.5.1",
|
||||
"react-virtuoso": "^4.10.4",
|
||||
"reactflow": "^11.11.4",
|
||||
"redux-dynamic-middlewares": "^2.2.0",
|
||||
"redux-remember": "^5.1.0",
|
||||
"redux-undo": "^1.1.0",
|
||||
@@ -126,7 +127,7 @@
|
||||
"@storybook/addon-storysource": "^8.3.4",
|
||||
"@storybook/manager-api": "^8.3.4",
|
||||
"@storybook/react": "^8.3.4",
|
||||
"@storybook/react-vite": "^8.3.4",
|
||||
"@storybook/react-vite": "^8.5.5",
|
||||
"@storybook/theming": "^8.3.4",
|
||||
"@types/dateformat": "^5.0.2",
|
||||
"@types/lodash-es": "^4.17.12",
|
||||
@@ -134,9 +135,9 @@
|
||||
"@types/react": "^18.3.11",
|
||||
"@types/react-dom": "^18.3.0",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@vitejs/plugin-react-swc": "^3.7.1",
|
||||
"@vitest/coverage-v8": "^1.6.0",
|
||||
"@vitest/ui": "^1.6.0",
|
||||
"@vitejs/plugin-react-swc": "^3.8.0",
|
||||
"@vitest/coverage-v8": "^3.0.5",
|
||||
"@vitest/ui": "^3.0.5",
|
||||
"concurrently": "^8.2.2",
|
||||
"csstype": "^3.1.3",
|
||||
"dpdm": "^3.14.0",
|
||||
@@ -152,11 +153,11 @@
|
||||
"tsafe": "^1.7.5",
|
||||
"type-fest": "^4.26.1",
|
||||
"typescript": "^5.6.2",
|
||||
"vite": "^5.4.8",
|
||||
"vite": "^6.1.0",
|
||||
"vite-plugin-css-injected-by-js": "^3.5.2",
|
||||
"vite-plugin-dts": "^3.9.1",
|
||||
"vite-plugin-dts": "^4.5.0",
|
||||
"vite-plugin-eslint": "^1.8.1",
|
||||
"vite-tsconfig-paths": "^4.3.2",
|
||||
"vite-tsconfig-paths": "^5.1.4",
|
||||
"vitest": "^1.6.0"
|
||||
},
|
||||
"engines": {
|
||||
|
||||
1929
invokeai/frontend/web/pnpm-lock.yaml
generated
1929
invokeai/frontend/web/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -99,7 +99,15 @@
|
||||
"clipboard": "Zwischenablage",
|
||||
"generating": "Generieren",
|
||||
"loadingModel": "Lade Modell",
|
||||
"warnings": "Warnungen"
|
||||
"warnings": "Warnungen",
|
||||
"start": "Starten",
|
||||
"count": "Anzahl",
|
||||
"step": "Schritt",
|
||||
"values": "Werte",
|
||||
"min": "Min",
|
||||
"max": "Max",
|
||||
"resetToDefaults": "Auf Standard zurücksetzen",
|
||||
"seed": "Seed"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Bildgröße",
|
||||
@@ -119,7 +127,6 @@
|
||||
"autoAssignBoardOnClick": "Board per Klick automatisch zuweisen",
|
||||
"noImageSelected": "Kein Bild ausgewählt",
|
||||
"starImage": "Bild markieren",
|
||||
"assets": "Ressourcen",
|
||||
"unstarImage": "Markierung entfernen",
|
||||
"image": "Bild",
|
||||
"deleteSelection": "Lösche Auswahl",
|
||||
@@ -1282,7 +1289,16 @@
|
||||
"unknownFieldType": "$t(nodes.unknownField) Typ: {{type}}",
|
||||
"unknownField": "Unbekanntes Feld",
|
||||
"unableToUpdateNodes_one": "{{count}} Knoten kann nicht aktualisiert werden",
|
||||
"unableToUpdateNodes_other": "{{count}} Knoten können nicht aktualisiert werden"
|
||||
"unableToUpdateNodes_other": "{{count}} Knoten können nicht aktualisiert werden",
|
||||
"uniformRandomDistribution": "Uniforme Zufallsverteilung",
|
||||
"linearDistribution": "Lineare Verteilung",
|
||||
"generatorNRandomValues_one": "{{count}} Zufallswert",
|
||||
"generatorNRandomValues_other": "{{count}} Zufallswerte",
|
||||
"arithmeticSequence": "Arithmetische Folge",
|
||||
"noBatchGroup": "keine Gruppe",
|
||||
"generatorNoValues": "leer",
|
||||
"generatorLoading": "wird geladen",
|
||||
"generatorLoadFromFile": "Aus Datei laden"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Korrektur für hohe Auflösungen",
|
||||
|
||||
@@ -187,7 +187,10 @@
|
||||
"values": "Values",
|
||||
"resetToDefaults": "Reset to Defaults",
|
||||
"seed": "Seed",
|
||||
"combinatorial": "Combinatorial"
|
||||
"combinatorial": "Combinatorial",
|
||||
"layout": "Layout",
|
||||
"row": "Row",
|
||||
"column": "Column"
|
||||
},
|
||||
"hrf": {
|
||||
"hrf": "High Resolution Fix",
|
||||
@@ -219,9 +222,15 @@
|
||||
"pauseSucceeded": "Processor Paused",
|
||||
"pauseFailed": "Problem Pausing Processor",
|
||||
"cancel": "Cancel",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog": "Canceling all queue items except the current one will stop pending items but allow the in-progress one to finish.",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog2": "Are you sure you want to cancel all pending queue items?",
|
||||
"cancelAllExceptCurrentTooltip": "Cancel All Except Current Item",
|
||||
"cancelTooltip": "Cancel Current Item",
|
||||
"cancelSucceeded": "Item Canceled",
|
||||
"cancelFailed": "Problem Canceling Item",
|
||||
"retrySucceeded": "Item Retried",
|
||||
"retryFailed": "Problem Retrying Item",
|
||||
"confirm": "Confirm",
|
||||
"prune": "Prune",
|
||||
"pruneTooltip": "Prune {{item_count}} Completed Items",
|
||||
"pruneSucceeded": "Pruned {{item_count}} Completed Items from Queue",
|
||||
@@ -232,6 +241,7 @@
|
||||
"clearFailed": "Problem Clearing Queue",
|
||||
"cancelBatch": "Cancel Batch",
|
||||
"cancelItem": "Cancel Item",
|
||||
"retryItem": "Retry Item",
|
||||
"cancelBatchSucceeded": "Batch Canceled",
|
||||
"cancelBatchFailed": "Problem Canceling Batch",
|
||||
"clearQueueAlertDialog": "Clearing the queue immediately cancels any processing items and clears the queue entirely. Pending filters will be canceled.",
|
||||
@@ -294,10 +304,15 @@
|
||||
"disableFailed": "Problem Disabling Invocation Cache",
|
||||
"useCache": "Use Cache"
|
||||
},
|
||||
"modelCache": {
|
||||
"clear": "Clear Model Cache",
|
||||
"clearSucceeded": "Model Cache Cleared",
|
||||
"clearFailed": "Problem Clearing Model Cache"
|
||||
},
|
||||
"gallery": {
|
||||
"gallery": "Gallery",
|
||||
"alwaysShowImageSizeBadge": "Always Show Image Size Badge",
|
||||
"assets": "Assets",
|
||||
"alwaysShowImageSizeBadge": "Always Show Image Size Badge",
|
||||
"assetsTab": "Files you’ve uploaded for use in your projects.",
|
||||
"autoAssignBoardOnClick": "Auto-Assign Board on Click",
|
||||
"autoSwitchNewImages": "Auto-Switch to New Images",
|
||||
@@ -922,6 +937,7 @@
|
||||
"noWorkflows": "No Workflows",
|
||||
"noMatchingWorkflows": "No Matching Workflows",
|
||||
"noWorkflow": "No Workflow",
|
||||
"unableToUpdateNode": "Node update failed: node {{node}} of type {{type}} (may require deleting and recreating)",
|
||||
"mismatchedVersion": "Invalid node: node {{node}} of type {{type}} has mismatched version (try updating?)",
|
||||
"missingTemplate": "Invalid node: node {{node}} of type {{type}} missing template (not installed?)",
|
||||
"sourceNodeDoesNotExist": "Invalid edge: source/output node {{node}} does not exist",
|
||||
@@ -929,6 +945,7 @@
|
||||
"sourceNodeFieldDoesNotExist": "Invalid edge: source/output field {{node}}.{{field}} does not exist",
|
||||
"targetNodeFieldDoesNotExist": "Invalid edge: target/input field {{node}}.{{field}} does not exist",
|
||||
"deletedInvalidEdge": "Deleted invalid edge {{source}} -> {{target}}",
|
||||
"deletedMissingNodeFieldFormElement": "Deleted missing form field: node {{nodeId}} field {{fieldName}}",
|
||||
"noConnectionInProgress": "No connection in progress",
|
||||
"node": "Node",
|
||||
"nodeOutputs": "Node Outputs",
|
||||
@@ -943,6 +960,7 @@
|
||||
"nodeVersion": "Node Version",
|
||||
"noOutputRecorded": "No outputs recorded",
|
||||
"notes": "Notes",
|
||||
"description": "Description",
|
||||
"notesDescription": "Add notes about your workflow",
|
||||
"problemSettingTitle": "Problem Setting Title",
|
||||
"resetToDefaultValue": "Reset to default value",
|
||||
@@ -1239,6 +1257,8 @@
|
||||
"problemCopyingLayer": "Unable to Copy Layer",
|
||||
"problemSavingLayer": "Unable to Save Layer",
|
||||
"problemDownloadingImage": "Unable to Download Image",
|
||||
"pasteSuccess": "Pasted to {{destination}}",
|
||||
"pasteFailed": "Paste Failed",
|
||||
"prunedQueue": "Pruned Queue",
|
||||
"sentToCanvas": "Sent to Canvas",
|
||||
"sentToUpscale": "Sent to Upscale",
|
||||
@@ -1255,7 +1275,10 @@
|
||||
"workflowLoaded": "Workflow Loaded",
|
||||
"problemRetrievingWorkflow": "Problem Retrieving Workflow",
|
||||
"workflowDeleted": "Workflow Deleted",
|
||||
"problemDeletingWorkflow": "Problem Deleting Workflow"
|
||||
"problemDeletingWorkflow": "Problem Deleting Workflow",
|
||||
"unableToCopy": "Unable to Copy",
|
||||
"unableToCopyDesc": "Your browser does not support clipboard access. Firefox users may be able to fix this by following ",
|
||||
"unableToCopyDesc_theseSteps": "these steps"
|
||||
},
|
||||
"popovers": {
|
||||
"clipSkip": {
|
||||
@@ -1680,7 +1703,26 @@
|
||||
"download": "Download",
|
||||
"copyShareLink": "Copy Share Link",
|
||||
"copyShareLinkForWorkflow": "Copy Share Link for Workflow",
|
||||
"delete": "Delete"
|
||||
"delete": "Delete",
|
||||
"builder": {
|
||||
"builder": "Builder",
|
||||
"layout": "Layout",
|
||||
"row": "Row",
|
||||
"column": "Column",
|
||||
"label": "Label",
|
||||
"description": "Description",
|
||||
"component": "Component",
|
||||
"numberInput": "Number Input",
|
||||
"slider": "Slider",
|
||||
"both": "Both",
|
||||
"emptyRootPlaceholderViewMode": "Click Edit to start building a form for this workflow.",
|
||||
"emptyRootPlaceholderEditMode": "Drag a form element or node field here to get started.",
|
||||
"containerPlaceholder": "Empty Container",
|
||||
"containerPlaceholderDesc": "Drag a form element or node field into this container.",
|
||||
"headingPlaceholder": "Empty Heading",
|
||||
"textPlaceholder": "Empty Text",
|
||||
"workflowBuilderAlphaWarning": "The workflow builder is currently in alpha. There may be breaking changes before the stable release."
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
"regional": "Regional",
|
||||
@@ -1695,6 +1737,8 @@
|
||||
"cropLayerToBbox": "Crop Layer to Bbox",
|
||||
"savedToGalleryOk": "Saved to Gallery",
|
||||
"savedToGalleryError": "Error saving to gallery",
|
||||
"regionCopiedToClipboard": "{{region}} Copied to Clipboard",
|
||||
"copyRegionError": "Error copying {{region}}",
|
||||
"newGlobalReferenceImageOk": "Created Global Reference Image",
|
||||
"newGlobalReferenceImageError": "Problem Creating Global Reference Image",
|
||||
"newRegionalReferenceImageOk": "Created Regional Reference Image",
|
||||
@@ -1805,6 +1849,14 @@
|
||||
"newControlLayer": "New $t(controlLayers.controlLayer)",
|
||||
"newInpaintMask": "New $t(controlLayers.inpaintMask)",
|
||||
"newRegionalGuidance": "New $t(controlLayers.regionalGuidance)",
|
||||
"pasteTo": "Paste To",
|
||||
"pasteToAssets": "Assets",
|
||||
"pasteToAssetsDesc": "Paste to Assets",
|
||||
"pasteToBbox": "Bbox",
|
||||
"pasteToBboxDesc": "New Layer (in Bbox)",
|
||||
"pasteToCanvas": "Canvas",
|
||||
"pasteToCanvasDesc": "New Layer (in Canvas)",
|
||||
"pastedTo": "Pasted to {{destination}}",
|
||||
"transparency": "Transparency",
|
||||
"enableTransparencyEffect": "Enable Transparency Effect",
|
||||
"disableTransparencyEffect": "Disable Transparency Effect",
|
||||
@@ -1849,6 +1901,10 @@
|
||||
"rgAutoNegativeNotSupported": "Auto-Negative not supported for selected base model",
|
||||
"rgNoRegion": "no region drawn"
|
||||
},
|
||||
"errors": {
|
||||
"unableToFindImage": "Unable to find image",
|
||||
"unableToLoadImage": "Unable to Load Image"
|
||||
},
|
||||
"controlMode": {
|
||||
"controlMode": "Control Mode",
|
||||
"balanced": "Balanced (recommended)",
|
||||
@@ -1989,6 +2045,30 @@
|
||||
"salt_and_pepper_type": "Salt and Pepper",
|
||||
"noise_color": "Colored Noise",
|
||||
"size": "Noise Size"
|
||||
},
|
||||
"adjust_image": {
|
||||
"label": "Adjust Image",
|
||||
"description": "Adjusts the selected channel of an image.",
|
||||
"channel": "Channel",
|
||||
"value_setting": "Value",
|
||||
"scale_values": "Scale Values",
|
||||
"red": "Red (RGBA)",
|
||||
"green": "Green (RGBA)",
|
||||
"blue": "Blue (RGBA)",
|
||||
"alpha": "Alpha (RGBA)",
|
||||
"cyan": "Cyan (CMYK)",
|
||||
"magenta": "Magenta (CMYK)",
|
||||
"yellow": "Yellow (CMYK)",
|
||||
"black": "Black (CMYK)",
|
||||
"hue": "Hue (HSV)",
|
||||
"saturation": "Saturation (HSV)",
|
||||
"value": "Value (HSV)",
|
||||
"luminosity": "Luminosity (LAB)",
|
||||
"a": "A (LAB)",
|
||||
"b": "B (LAB)",
|
||||
"y": "Y (YCbCr)",
|
||||
"cb": "Cb (YCbCr)",
|
||||
"cr": "Cr (YCbCr)"
|
||||
}
|
||||
},
|
||||
"transform": {
|
||||
@@ -2062,7 +2142,10 @@
|
||||
"newRasterLayer": "New Raster Layer",
|
||||
"newInpaintMask": "New Inpaint Mask",
|
||||
"newRegionalGuidance": "New Regional Guidance",
|
||||
"cropCanvasToBbox": "Crop Canvas to Bbox"
|
||||
"cropCanvasToBbox": "Crop Canvas to Bbox",
|
||||
"copyToClipboard": "Copy to Clipboard",
|
||||
"copyCanvasToClipboard": "Copy Canvas to Clipboard",
|
||||
"copyBboxToClipboard": "Copy Bbox to Clipboard"
|
||||
},
|
||||
"stagingArea": {
|
||||
"accept": "Accept",
|
||||
@@ -2197,11 +2280,12 @@
|
||||
"whatsNew": {
|
||||
"whatsNewInInvoke": "What's New in Invoke",
|
||||
"items": [
|
||||
"Low-VRAM mode",
|
||||
"Dynamic memory management",
|
||||
"Faster model loading times",
|
||||
"Fewer memory errors",
|
||||
"Expanded workflow batch capabilities"
|
||||
"Improved VRAM setting defaults",
|
||||
"On-demand model cache clearing",
|
||||
"Expanded FLUX LoRA compatibility",
|
||||
"Canvas Adjust Image filter",
|
||||
"Cancel all but current queue item",
|
||||
"Copy from and paste to Canvas"
|
||||
],
|
||||
"readReleaseNotes": "Read Release Notes",
|
||||
"watchRecentReleaseVideos": "Watch Recent Release Videos",
|
||||
|
||||
@@ -109,7 +109,6 @@
|
||||
"deleteImage_many": "Eliminar {{count}} Imágenes",
|
||||
"deleteImage_other": "Eliminar {{count}} Imágenes",
|
||||
"deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.",
|
||||
"assets": "Activos",
|
||||
"autoAssignBoardOnClick": "Asignar automática tableros al hacer clic",
|
||||
"gallery": "Galería",
|
||||
"noImageSelected": "Sin imágenes seleccionadas",
|
||||
|
||||
@@ -114,7 +114,6 @@
|
||||
"sortDirection": "Direction de tri",
|
||||
"sideBySide": "Côte-à-Côte",
|
||||
"hover": "Au passage de la souris",
|
||||
"assets": "Ressources",
|
||||
"alwaysShowImageSizeBadge": "Toujours montrer le badge de taille de l'Image",
|
||||
"gallery": "Galerie",
|
||||
"bulkDownloadRequestFailed": "Problème lors de la préparation du téléchargement",
|
||||
@@ -302,7 +301,9 @@
|
||||
"hfTokenHelperText": "Un token HF est requis pour utiliser certains modèles. Cliquez ici pour créer ou obtenir votre token.",
|
||||
"hfTokenInvalid": "Token HF invalide ou manquant",
|
||||
"hfForbidden": "Vous n'avez pas accès à ce modèle HF.",
|
||||
"hfTokenInvalidErrorMessage2": "Mettre à jour dans le "
|
||||
"hfTokenInvalidErrorMessage2": "Mettre à jour dans le ",
|
||||
"controlLora": "Controle LoRA",
|
||||
"urlUnauthorizedErrorMessage2": "Découvrir comment ici."
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Images",
|
||||
@@ -333,7 +334,7 @@
|
||||
"showOptionsPanel": "Afficher le panneau latéral (O ou T)",
|
||||
"invoke": {
|
||||
"noPrompts": "Aucun prompts généré",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} entrée manquante",
|
||||
"missingInputForField": "entrée manquante",
|
||||
"missingFieldTemplate": "Modèle de champ manquant",
|
||||
"invoke": "Invoke",
|
||||
"addingImagesTo": "Ajouter des images à",
|
||||
@@ -354,7 +355,9 @@
|
||||
"canvasIsCompositing": "La toile est en train de composer",
|
||||
"collectionTooFewItems": "{{nodeLabel}} -> {{fieldLabel}} : trop peu d'éléments, minimum {{minItems}}",
|
||||
"collectionTooManyItems": "{{nodeLabel}} -> {{fieldLabel}} : trop d'éléments, maximum {{maxItems}}",
|
||||
"canvasIsSelectingObject": "La toile est occupée (sélection d'objet)"
|
||||
"canvasIsSelectingObject": "La toile est occupée (sélection d'objet)",
|
||||
"emptyBatches": "lots vides",
|
||||
"batchNodeNotConnected": "Noeud de lots non connecté : {{label}}"
|
||||
},
|
||||
"negativePromptPlaceholder": "Prompt Négatif",
|
||||
"positivePromptPlaceholder": "Prompt Positif",
|
||||
@@ -1631,7 +1634,26 @@
|
||||
"boardAccessError": "Impossible de trouver la planche {{board_id}}, réinitialisation à la valeur par défaut",
|
||||
"workflowHelpText": "Besoin d'aide ? Consultez notre guide sur <LinkComponent>Comment commencer avec les Workflows</LinkComponent>.",
|
||||
"noWorkflows": "Aucun Workflows",
|
||||
"noMatchingWorkflows": "Aucun Workflows correspondant"
|
||||
"noMatchingWorkflows": "Aucun Workflows correspondant",
|
||||
"arithmeticSequence": "Séquence Arithmétique",
|
||||
"uniformRandomDistribution": "Distribution Aléatoire Uniforme",
|
||||
"noBatchGroup": "aucun groupe",
|
||||
"generatorLoading": "chargement",
|
||||
"generatorLoadFromFile": "Charger depuis un Fichier",
|
||||
"dynamicPromptsRandom": "Prompts Dynamiques (Aléatoire)",
|
||||
"integerRangeGenerator": "Générateur d'interval d'entiers",
|
||||
"generateValues": "Générer Valeurs",
|
||||
"linearDistribution": "Distribution Linéaire",
|
||||
"floatRangeGenerator": "Générateur d'interval de nombres décimaux",
|
||||
"generatorNRandomValues_one": "{{count}} valeur aléatoire",
|
||||
"generatorNRandomValues_many": "{{count}} valeurs aléatoires",
|
||||
"generatorNRandomValues_other": "{{count}} valeurs aléatoires",
|
||||
"dynamicPromptsCombinatorial": "Prompts Dynamiques (Combinatoire)",
|
||||
"parseString": "Analyser la chaine de charactères",
|
||||
"internalDesc": "Cette invocation est utilisée internalement par Invoke. En fonction des mises à jours il est possible que des changements y soit effectués ou qu'elle soit supprimé sans prévention.",
|
||||
"splitOn": "Diviser sur",
|
||||
"generatorNoValues": "vide",
|
||||
"addItem": "Ajouter un élément"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "Aucun modèle correspondant",
|
||||
@@ -2227,6 +2249,10 @@
|
||||
"understandingImageToImageAndDenoising": {
|
||||
"title": "Comprendre l'Image-à-Image et le Débruitage",
|
||||
"description": "Aperçu des transformations d'image à image et du débruitage dans Invoke."
|
||||
},
|
||||
"howDoIOutpaint": {
|
||||
"title": "Comment effectuer un outpainting ?",
|
||||
"description": "Guide pour l'extension au-delà des bordures de l'image originale."
|
||||
}
|
||||
},
|
||||
"gettingStarted": "Commencer",
|
||||
|
||||
@@ -104,7 +104,8 @@
|
||||
"end": "Fine",
|
||||
"resetToDefaults": "Ripristina le impostazioni predefinite",
|
||||
"seed": "Seme",
|
||||
"combinatorial": "Combinatorio"
|
||||
"combinatorial": "Combinatorio",
|
||||
"count": "Quantità"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -115,7 +116,6 @@
|
||||
"deleteImage_many": "Elimina {{count}} immagini",
|
||||
"deleteImage_other": "Elimina {{count}} immagini",
|
||||
"deleteImagePermanent": "Le immagini eliminate non possono essere ripristinate.",
|
||||
"assets": "Risorse",
|
||||
"autoAssignBoardOnClick": "Assegna automaticamente la bacheca al clic",
|
||||
"featuresWillReset": "Se elimini questa immagine, quelle funzionalità verranno immediatamente ripristinate.",
|
||||
"loading": "Caricamento in corso",
|
||||
@@ -172,7 +172,8 @@
|
||||
"imagesTab": "Immagini create e salvate in Invoke.",
|
||||
"assetsTab": "File che hai caricato per usarli nei tuoi progetti.",
|
||||
"boardsSettings": "Impostazioni Bacheche",
|
||||
"imagesSettings": "Impostazioni Immagini Galleria"
|
||||
"imagesSettings": "Impostazioni Immagini Galleria",
|
||||
"assets": "Risorse"
|
||||
},
|
||||
"hotkeys": {
|
||||
"searchHotkeys": "Cerca tasti di scelta rapida",
|
||||
@@ -832,7 +833,12 @@
|
||||
"uploadFailedInvalidUploadDesc_withCount_one": "Devi caricare al massimo 1 immagine PNG o JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_many": "Devi caricare al massimo {{count}} immagini PNG o JPEG.",
|
||||
"uploadFailedInvalidUploadDesc_withCount_other": "Devi caricare al massimo {{count}} immagini PNG o JPEG.",
|
||||
"outOfMemoryErrorDescLocal": "Segui la nostra <LinkComponent>guida per bassa VRAM</LinkComponent> per ridurre gli OOM."
|
||||
"outOfMemoryErrorDescLocal": "Segui la nostra <LinkComponent>guida per bassa VRAM</LinkComponent> per ridurre gli OOM.",
|
||||
"pasteFailed": "Incolla non riuscita",
|
||||
"pasteSuccess": "Incollato su {{destination}}",
|
||||
"unableToCopy": "Impossibile copiare",
|
||||
"unableToCopyDesc": "Il tuo browser non supporta l'accesso agli appunti. Gli utenti di Firefox potrebbero risolvere il problema seguendo ",
|
||||
"unableToCopyDesc_theseSteps": "questi passaggi"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Barra di avanzamento generazione",
|
||||
@@ -1131,7 +1137,11 @@
|
||||
"generation": "Generazione",
|
||||
"other": "Altro",
|
||||
"gallery": "Galleria",
|
||||
"batchSize": "Dimensione del lotto"
|
||||
"batchSize": "Dimensione del lotto",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog2": "Vuoi davvero annullare tutti gli elementi in coda in sospeso?",
|
||||
"confirm": "Conferma",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog": "L'annullamento di tutti gli elementi della coda, eccetto quello corrente, interromperà gli elementi in sospeso ma consentirà il completamento di quello in corso.",
|
||||
"cancelAllExceptCurrentTooltip": "Annulla tutto tranne l'elemento corrente"
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "Nessun modello corrispondente",
|
||||
@@ -1962,6 +1972,25 @@
|
||||
"noise_type": "Tipo di rumore",
|
||||
"label": "Aggiungi rumore",
|
||||
"noise_amount": "Quantità"
|
||||
},
|
||||
"adjust_image": {
|
||||
"description": "Regola il canale selezionato di un'immagine.",
|
||||
"alpha": "Alfa (RGBA)",
|
||||
"label": "Regola l'immagine",
|
||||
"blue": "Blu (RGBA)",
|
||||
"luminosity": "Luminosità (LAB)",
|
||||
"channel": "Canale",
|
||||
"value_setting": "Valore",
|
||||
"scale_values": "Scala i valori",
|
||||
"red": "Rosso (RGBA)",
|
||||
"green": "Verde (RGBA)",
|
||||
"cyan": "Ciano (CMYK)",
|
||||
"magenta": "Magenta (CMYK)",
|
||||
"yellow": "Giallo (CMYK)",
|
||||
"black": "Nero (CMYK)",
|
||||
"hue": "Tonalità (HSV)",
|
||||
"saturation": "Saturazione (HSV)",
|
||||
"value": "Valore (HSV)"
|
||||
}
|
||||
},
|
||||
"controlLayers_withCount_hidden": "Livelli di controllo ({{count}} nascosti)",
|
||||
@@ -2069,7 +2098,10 @@
|
||||
"saveCanvasToGallery": "Salva la Tela nella Galleria",
|
||||
"saveToGalleryGroup": "Salva nella Galleria",
|
||||
"newInpaintMask": "Nuova maschera Inpaint",
|
||||
"newRegionalGuidance": "Nuova Guida Regionale"
|
||||
"newRegionalGuidance": "Nuova Guida Regionale",
|
||||
"copyToClipboard": "Copia negli appunti",
|
||||
"copyCanvasToClipboard": "Copia la tela negli appunti",
|
||||
"copyBboxToClipboard": "Copia il riquadro di delimitazione negli appunti"
|
||||
},
|
||||
"newImg2ImgCanvasFromImage": "Nuova Immagine da immagine",
|
||||
"copyRasterLayerTo": "Copia $t(controlLayers.rasterLayer) in",
|
||||
@@ -2132,7 +2164,17 @@
|
||||
"ipAdapterIncompatibleBaseModel": "modello base dell'immagine di riferimento incompatibile",
|
||||
"ipAdapterNoImageSelected": "nessuna immagine di riferimento selezionata",
|
||||
"rgAutoNegativeNotSupported": "Auto-Negativo non supportato per il modello base selezionato"
|
||||
}
|
||||
},
|
||||
"pasteTo": "Incolla su",
|
||||
"pasteToBboxDesc": "Nuovo livello (nel riquadro di delimitazione)",
|
||||
"pasteToAssets": "Risorse",
|
||||
"copyRegionError": "Errore durante la copia di {{region}}",
|
||||
"pasteToAssetsDesc": "Incolla in Risorse",
|
||||
"pasteToBbox": "Riquadro di delimitazione",
|
||||
"pasteToCanvas": "Tela",
|
||||
"pasteToCanvasDesc": "Nuovo livello (nella Tela)",
|
||||
"pastedTo": "Incollato su {{destination}}",
|
||||
"regionCopiedToClipboard": "{{region}} Copiato negli appunti"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2231,10 +2273,12 @@
|
||||
"watchRecentReleaseVideos": "Guarda i video su questa versione",
|
||||
"watchUiUpdatesOverview": "Guarda le novità dell'interfaccia",
|
||||
"items": [
|
||||
"Modalità Bassa-VRAM",
|
||||
"Gestione dinamica della memoria",
|
||||
"Tempi di caricamento del modello più rapidi",
|
||||
"Meno errori di memoria"
|
||||
"Impostazioni predefinite VRAM migliorate",
|
||||
"Cancellazione della cache del modello su richiesta",
|
||||
"Compatibilità estesa FLUX LoRA",
|
||||
"Filtro Regola Immagine su Tela",
|
||||
"Annulla tutto tranne l'elemento della coda corrente",
|
||||
"Copia da e incolla sulla Tela"
|
||||
]
|
||||
},
|
||||
"system": {
|
||||
@@ -2324,5 +2368,10 @@
|
||||
"watch": "Guarda",
|
||||
"studioSessionsDesc1": "Dai un'occhiata a <StudioSessionsPlaylistLink /> per approfondimenti su Invoke.",
|
||||
"studioSessionsDesc2": "Unisciti al nostro <DiscordLink /> per partecipare alle sessioni live e fare domande. Le sessioni vengono caricate sulla playlist la settimana successiva."
|
||||
},
|
||||
"modelCache": {
|
||||
"clear": "Cancella la cache del modello",
|
||||
"clearSucceeded": "Cache del modello cancellata",
|
||||
"clearFailed": "Problema durante la cancellazione della cache del modello"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,7 +106,6 @@
|
||||
"featuresWillReset": "この画像を削除すると、これらの機能は即座にリセットされます。",
|
||||
"unstarImage": "スターを外す",
|
||||
"loading": "ロード中",
|
||||
"assets": "アセット",
|
||||
"currentlyInUse": "この画像は現在下記の機能を使用しています:",
|
||||
"drop": "ドロップ",
|
||||
"dropOrUpload": "$t(gallery.drop) またはアップロード",
|
||||
|
||||
@@ -68,7 +68,6 @@
|
||||
"gallerySettings": "갤러리 설정",
|
||||
"deleteSelection": "선택 항목 삭제",
|
||||
"featuresWillReset": "이 이미지를 삭제하면 해당 기능이 즉시 재설정됩니다.",
|
||||
"assets": "자산",
|
||||
"noImagesInGallery": "보여줄 이미지가 없음",
|
||||
"autoSwitchNewImages": "새로운 이미지로 자동 전환",
|
||||
"loading": "불러오는 중",
|
||||
|
||||
@@ -90,7 +90,6 @@
|
||||
"deleteImage_one": "Verwijder afbeelding",
|
||||
"deleteImage_other": "",
|
||||
"deleteImagePermanent": "Verwijderde afbeeldingen kunnen niet worden hersteld.",
|
||||
"assets": "Eigen onderdelen",
|
||||
"autoAssignBoardOnClick": "Ken automatisch bord toe bij klikken",
|
||||
"featuresWillReset": "Als je deze afbeelding verwijdert, dan worden deze functies onmiddellijk teruggezet.",
|
||||
"loading": "Bezig met laden",
|
||||
|
||||
@@ -105,7 +105,6 @@
|
||||
"assetsTab": "Pliki, które wrzuciłeś do użytku w twoich projektach.",
|
||||
"currentlyInUse": "Ten obraz jest obecnie w użyciu przez następujące funkcje:",
|
||||
"boardsSettings": "Ustawienia tablic",
|
||||
"assets": "Aktywy",
|
||||
"autoAssignBoardOnClick": "Automatycznie przypisz tablicę po kliknięciu",
|
||||
"copy": "Kopiuj"
|
||||
},
|
||||
|
||||
@@ -106,7 +106,6 @@
|
||||
"deleteImage_one": "Удалить изображение",
|
||||
"deleteImage_few": "Удалить {{count}} изображения",
|
||||
"deleteImage_many": "Удалить {{count}} изображений",
|
||||
"assets": "Ресурсы",
|
||||
"autoAssignBoardOnClick": "Авто-назначение доски по клику",
|
||||
"deleteSelection": "Удалить выделенное",
|
||||
"featuresWillReset": "Если вы удалите это изображение, эти функции будут немедленно сброшены.",
|
||||
|
||||
@@ -195,7 +195,6 @@
|
||||
},
|
||||
"gallery": {
|
||||
"deleteImagePermanent": "Silinen görseller geri getirilemez.",
|
||||
"assets": "Özkaynaklar",
|
||||
"autoAssignBoardOnClick": "Tıklanan Panoya Otomatik Atama",
|
||||
"loading": "Yükleniyor",
|
||||
"starImage": "Yıldız Koy",
|
||||
|
||||
@@ -86,7 +86,6 @@
|
||||
"bulkDownloadRequestedDesc": "Yêu cầu tải xuống đang được chuẩn bị. Vui lòng chờ trong giây lát.",
|
||||
"starImage": "Gắn Sao Cho Ảnh",
|
||||
"openViewer": "Mở Trình Xem",
|
||||
"assets": "Tài Nguyên",
|
||||
"viewerImage": "Trình Xem Ảnh",
|
||||
"sideBySide": "Cạnh Nhau",
|
||||
"alwaysShowImageSizeBadge": "Luôn Hiển Thị Kích Thước Ảnh",
|
||||
@@ -118,7 +117,8 @@
|
||||
"unstarImage": "Ngừng Gắn Sao Cho Ảnh",
|
||||
"compareHelp2": "Nhấn <Kbd>M</Kbd> để tuần hoàn trong chế độ so sánh.",
|
||||
"boardsSettings": "Thiết Lập Bảng",
|
||||
"imagesSettings": "Cài Đặt Thư Viện Ảnh"
|
||||
"imagesSettings": "Cài Đặt Thư Viện Ảnh",
|
||||
"assets": "Tài Nguyên"
|
||||
},
|
||||
"common": {
|
||||
"ipAdapter": "IP Adapter",
|
||||
@@ -228,7 +228,9 @@
|
||||
"end": "Kết Thúc",
|
||||
"min": "Tối Thiểu",
|
||||
"max": "Tối Đa",
|
||||
"resetToDefaults": "Đặt Lại Về Mặc Định"
|
||||
"resetToDefaults": "Đặt Lại Về Mặc Định",
|
||||
"seed": "Hạt Giống",
|
||||
"combinatorial": "Tổ Hợp"
|
||||
},
|
||||
"prompt": {
|
||||
"addPromptTrigger": "Thêm Prompt Trigger",
|
||||
@@ -302,7 +304,11 @@
|
||||
"completedIn": "Hoàn tất trong",
|
||||
"graphQueued": "Đồ Thị Đã Vào Hàng",
|
||||
"batchQueuedDesc_other": "Thêm {{count}} phiên vào {{direction}} của hàng",
|
||||
"batchSize": "Kích Thước Lô"
|
||||
"batchSize": "Kích Thước Lô",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog": "Huỷ tất cả mục đang xếp hàng ngoại trừ mục hiện tại, sẽ dừng các mục đang chờ nhưng cho phép các mục đang chạy được hoàn tất.",
|
||||
"cancelAllExceptCurrentQueueItemAlertDialog2": "Bạn có chắc muốn huỷ tất cả mục đang chờ?",
|
||||
"cancelAllExceptCurrentTooltip": "Huỷ Bỏ Tất Cả Ngoại Trừ Mục Hiện Tại",
|
||||
"confirm": "Đồng Ý"
|
||||
},
|
||||
"hotkeys": {
|
||||
"canvas": {
|
||||
@@ -985,7 +991,11 @@
|
||||
"generatorNoValues": "trống",
|
||||
"splitOn": "Tách Ở",
|
||||
"arithmeticSequence": "Cấp Số Cộng",
|
||||
"generatorNRandomValues_other": "{{count}} giá trị ngẫu nhiên"
|
||||
"generatorNRandomValues_other": "{{count}} giá trị ngẫu nhiên",
|
||||
"generatorLoading": "đang tải",
|
||||
"generatorLoadFromFile": "Tải Từ Tệp",
|
||||
"dynamicPromptsRandom": "Dynamic Prompts (Ngẫu Nhiên)",
|
||||
"dynamicPromptsCombinatorial": "Dynamic Prompts (Tổ Hợp)"
|
||||
},
|
||||
"popovers": {
|
||||
"paramCFGRescaleMultiplier": {
|
||||
@@ -1543,11 +1553,12 @@
|
||||
"perPromptLabel": "Một Hạt Giống Mỗi Ảnh",
|
||||
"perIterationLabel": "Hạt Giống Mỗi Lần Lặp Lại"
|
||||
},
|
||||
"loading": "Tạo Sinh Dùng Dynamic Prompt...",
|
||||
"loading": "Tạo Sinh Bằng Dynamic Prompt...",
|
||||
"showDynamicPrompts": "HIện Dynamic Prompt",
|
||||
"maxPrompts": "Số Lệnh Tối Đa",
|
||||
"promptsPreview": "Xem Trước Lệnh",
|
||||
"dynamicPrompts": "Dynamic Prompt"
|
||||
"dynamicPrompts": "Dynamic Prompt",
|
||||
"promptsToGenerate": "Lệnh Để Tạo Sinh"
|
||||
},
|
||||
"settings": {
|
||||
"beta": "Beta",
|
||||
@@ -1791,7 +1802,10 @@
|
||||
"newControlLayer": "Layer Điều Khiển Được Mới",
|
||||
"newRasterLayer": "Layer Dạng Raster Mới",
|
||||
"bboxGroup": "Được Tạo Từ Hộp Giới Hạn",
|
||||
"canvasGroup": "Canvas"
|
||||
"canvasGroup": "Canvas",
|
||||
"copyCanvasToClipboard": "Sao Chép Canvas Vào Clipboard",
|
||||
"copyToClipboard": "Sao Chép Vào Clipboard",
|
||||
"copyBboxToClipboard": "Sao Chép Hộp Giới Hạn Vào Clipboard"
|
||||
},
|
||||
"stagingArea": {
|
||||
"saveToGallery": "Lưu Vào Thư Viện",
|
||||
@@ -1908,6 +1922,30 @@
|
||||
"gaussian_type": "Gaussian",
|
||||
"noise_color": "Màu Nhiễu",
|
||||
"size": "Cỡ Nhiễu"
|
||||
},
|
||||
"adjust_image": {
|
||||
"channel": "Kênh Màu",
|
||||
"cyan": "Lục Lam (Cmyk)",
|
||||
"value_setting": "Giá Trị",
|
||||
"scale_values": "Giá Trị Theo Tỉ Lệ",
|
||||
"red": "Đỏ (Rgba)",
|
||||
"green": "Lục (rGba)",
|
||||
"blue": "Lam (rgBa)",
|
||||
"alpha": "Độ Trong Suốt (rgbA)",
|
||||
"luminosity": "Độ Sáng (Lab)",
|
||||
"magenta": "Hồng Đỏ (cMyk)",
|
||||
"yellow": "Vàng (cmYk)",
|
||||
"description": "Điều chỉnh kênh màu được chọn của ảnh.",
|
||||
"black": "Đen (cmyK)",
|
||||
"cr": "Cr (ycC)",
|
||||
"label": "Điều Chỉnh Ảnh",
|
||||
"value": "Độ Sáng (hsV)",
|
||||
"saturation": "Độ Bão Hoà (hSv)",
|
||||
"hue": "Vùng Màu (Hsv)",
|
||||
"a": "A (lAb)",
|
||||
"b": "B (laB)",
|
||||
"y": "Y (Ycc)",
|
||||
"cb": "Cb (yCc)"
|
||||
}
|
||||
},
|
||||
"transform": {
|
||||
@@ -1986,6 +2024,20 @@
|
||||
"rgReferenceImagesNotSupported": "Ảnh Mẫu Khu Vực không được hỗ trợ cho model cơ sở được chọn",
|
||||
"rgAutoNegativeNotSupported": "Tự Động Đảo Chiều không được hỗ trợ cho model cơ sở được chọn",
|
||||
"rgNoRegion": "không có khu vực được vẽ"
|
||||
},
|
||||
"pasteTo": "Dán Vào",
|
||||
"pasteToAssets": "Tài Nguyên",
|
||||
"pasteToAssetsDesc": "Dán Vào Tài Nguyên",
|
||||
"pasteToBbox": "Hộp Giới Hạn",
|
||||
"pasteToBboxDesc": "Layer Mới (Trong Hộp Giới Hạn)",
|
||||
"pasteToCanvas": "Canvas",
|
||||
"pasteToCanvasDesc": "Layer Mới (Trong Canvas)",
|
||||
"pastedTo": "Dán Vào {{destination}}",
|
||||
"regionCopiedToClipboard": "Sao Chép {{region}} Vào Clipboard",
|
||||
"copyRegionError": "Lỗi khi sao chép {{region}}",
|
||||
"errors": {
|
||||
"unableToLoadImage": "Không Thể Tải Hình Ảnh",
|
||||
"unableToFindImage": "Không Thể Tìm Hình Ảnh"
|
||||
}
|
||||
},
|
||||
"stylePresets": {
|
||||
@@ -2117,7 +2169,12 @@
|
||||
"problemDownloadingImage": "Không Thể Tải Xuống Ảnh",
|
||||
"problemCopyingLayer": "Không Thể Sao Chép Layer",
|
||||
"problemSavingLayer": "Không Thể Lưu Layer",
|
||||
"outOfMemoryErrorDescLocal": "Làm theo <LinkComponent>hướng dẫn VRAM Thấp</LinkComponent> của chúng tôi để hạn chế OOM (Tràn bộ nhớ)."
|
||||
"outOfMemoryErrorDescLocal": "Làm theo <LinkComponent>hướng dẫn VRAM Thấp</LinkComponent> của chúng tôi để hạn chế OOM (Tràn bộ nhớ).",
|
||||
"unableToCopy": "Không Thể Sao Chép",
|
||||
"unableToCopyDesc_theseSteps": "các bước sau",
|
||||
"unableToCopyDesc": "Trình duyệt của bạn không hỗ trợ tính năng clipboard. Người dùng Firefox có thể khắc phục theo ",
|
||||
"pasteSuccess": "Dán Vào {{destination}}",
|
||||
"pasteFailed": "Dán Thất Bại"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
@@ -2212,10 +2269,12 @@
|
||||
"watchRecentReleaseVideos": "Xem Video Phát Hành Mới Nhất",
|
||||
"watchUiUpdatesOverview": "Xem Tổng Quan Về Những Cập Nhật Cho Giao Diện Người Dùng",
|
||||
"items": [
|
||||
"Chế độ VRAM thấp",
|
||||
"Trình quản lý bộ nhớ động",
|
||||
"Tải model nhanh hơn",
|
||||
"Ít lỗi bộ nhớ hơn"
|
||||
"Cải thiện các thiết lập mặc định của VRAM",
|
||||
"Xoá bộ nhớ đệm của model theo yêu cầu",
|
||||
"Mở rộng khả năng tương thích LoRA trên FLUX",
|
||||
"Bộ lọc điều chỉnh ảnh trên Canvas",
|
||||
"Huỷ tất cả trừ mục đang xếp hàng hiện tại",
|
||||
"Sao chép và dán trên Canvas"
|
||||
]
|
||||
},
|
||||
"upsell": {
|
||||
@@ -2285,5 +2344,10 @@
|
||||
},
|
||||
"controlCanvas": "Điều Khiển Canvas",
|
||||
"watch": "Xem"
|
||||
},
|
||||
"modelCache": {
|
||||
"clearSucceeded": "Cache Model Đã Được Dọn",
|
||||
"clearFailed": "Có Vấn Đề Khi Dọn Cache Model",
|
||||
"clear": "Dọn Cache Model"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,7 +107,6 @@
|
||||
"noImagesInGallery": "无图像可用于显示",
|
||||
"deleteImage_other": "删除{{count}}张图片",
|
||||
"deleteImagePermanent": "删除的图片无法被恢复。",
|
||||
"assets": "素材",
|
||||
"autoAssignBoardOnClick": "点击后自动分配面板",
|
||||
"featuresWillReset": "如果您删除该图像,这些功能会立即被重置。",
|
||||
"loading": "加载中",
|
||||
|
||||
@@ -12,10 +12,12 @@ import { useFocusRegionWatcher } from 'common/hooks/focus';
|
||||
import { useClearStorage } from 'common/hooks/useClearStorage';
|
||||
import { useGlobalHotkeys } from 'common/hooks/useGlobalHotkeys';
|
||||
import ChangeBoardModal from 'features/changeBoardModal/components/ChangeBoardModal';
|
||||
import { CanvasPasteModal } from 'features/controlLayers/components/CanvasPasteModal';
|
||||
import {
|
||||
NewCanvasSessionDialog,
|
||||
NewGallerySessionDialog,
|
||||
} from 'features/controlLayers/components/NewSessionConfirmationAlertDialog';
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import DeleteImageModal from 'features/deleteImageModal/components/DeleteImageModal';
|
||||
import { FullscreenDropzone } from 'features/dnd/FullscreenDropzone';
|
||||
import { DynamicPromptsModal } from 'features/dynamicPrompts/components/DynamicPromptsPreviewModal';
|
||||
@@ -23,7 +25,9 @@ import DeleteBoardModal from 'features/gallery/components/Boards/DeleteBoardModa
|
||||
import { ImageContextMenu } from 'features/gallery/components/ImageContextMenu/ImageContextMenu';
|
||||
import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterModelsToast';
|
||||
import { ShareWorkflowModal } from 'features/nodes/components/sidePanel/WorkflowListMenu/ShareWorkflowModal';
|
||||
import { CancelAllExceptCurrentQueueItemConfirmationAlertDialog } from 'features/queue/components/CancelAllExceptCurrentQueueItemConfirmationAlertDialog';
|
||||
import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
|
||||
import { useReadinessWatcher } from 'features/queue/store/readiness';
|
||||
import { DeleteStylePresetDialog } from 'features/stylePresets/components/DeleteStylePresetDialog';
|
||||
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
|
||||
import RefreshAfterResetModal from 'features/system/components/SettingsModal/RefreshAfterResetModal';
|
||||
@@ -50,53 +54,25 @@ interface Props {
|
||||
}
|
||||
|
||||
const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
const language = useAppSelector(selectLanguage);
|
||||
const logger = useLogger('system');
|
||||
const dispatch = useAppDispatch();
|
||||
const clearStorage = useClearStorage();
|
||||
|
||||
// singleton!
|
||||
useSocketIO();
|
||||
useGlobalModifiersInit();
|
||||
useGlobalHotkeys();
|
||||
useGetOpenAPISchemaQuery();
|
||||
useSyncLoggingConfig();
|
||||
|
||||
const handleReset = useCallback(() => {
|
||||
clearStorage();
|
||||
location.reload();
|
||||
return false;
|
||||
}, [clearStorage]);
|
||||
|
||||
useEffect(() => {
|
||||
i18n.changeLanguage(language);
|
||||
}, [language]);
|
||||
|
||||
useEffect(() => {
|
||||
if (size(config)) {
|
||||
logger.info({ config }, 'Received config');
|
||||
dispatch(configChanged(config));
|
||||
}
|
||||
}, [dispatch, config, logger]);
|
||||
|
||||
useEffect(() => {
|
||||
dispatch(appStarted());
|
||||
}, [dispatch]);
|
||||
|
||||
useStudioInitAction(studioInitAction);
|
||||
useStarterModelsToast();
|
||||
useSyncQueueStatus();
|
||||
useFocusRegionWatcher();
|
||||
|
||||
return (
|
||||
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
|
||||
<Box id="invoke-app-wrapper" w="100dvw" h="100dvh" position="relative" overflow="hidden">
|
||||
<AppContent />
|
||||
</Box>
|
||||
<HookIsolator config={config} studioInitAction={studioInitAction} />
|
||||
<DeleteImageModal />
|
||||
<ChangeBoardModal />
|
||||
<DynamicPromptsModal />
|
||||
<StylePresetModal />
|
||||
<CancelAllExceptCurrentQueueItemConfirmationAlertDialog />
|
||||
<ClearQueueConfirmationsAlertDialog />
|
||||
<NewWorkflowConfirmationAlertDialog />
|
||||
<DeleteStylePresetDialog />
|
||||
@@ -110,8 +86,51 @@ const App = ({ config = DEFAULT_CONFIG, studioInitAction }: Props) => {
|
||||
<ImageContextMenu />
|
||||
<FullscreenDropzone />
|
||||
<VideosModal />
|
||||
<CanvasManagerProviderGate>
|
||||
<CanvasPasteModal />
|
||||
</CanvasManagerProviderGate>
|
||||
</ErrorBoundary>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(App);
|
||||
|
||||
// Running these hooks in a separate component ensures we do not inadvertently rerender the entire app when they change.
|
||||
const HookIsolator = memo(
|
||||
({ config, studioInitAction }: { config: PartialAppConfig; studioInitAction?: StudioInitAction }) => {
|
||||
const language = useAppSelector(selectLanguage);
|
||||
const logger = useLogger('system');
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
// singleton!
|
||||
useReadinessWatcher();
|
||||
useSocketIO();
|
||||
useGlobalModifiersInit();
|
||||
useGlobalHotkeys();
|
||||
useGetOpenAPISchemaQuery();
|
||||
useSyncLoggingConfig();
|
||||
|
||||
useEffect(() => {
|
||||
i18n.changeLanguage(language);
|
||||
}, [language]);
|
||||
|
||||
useEffect(() => {
|
||||
if (size(config)) {
|
||||
logger.info({ config }, 'Received config');
|
||||
dispatch(configChanged(config));
|
||||
}
|
||||
}, [dispatch, config, logger]);
|
||||
|
||||
useEffect(() => {
|
||||
dispatch(appStarted());
|
||||
}, [dispatch]);
|
||||
|
||||
useStudioInitAction(studioInitAction);
|
||||
useStarterModelsToast();
|
||||
useSyncQueueStatus();
|
||||
useFocusRegionWatcher();
|
||||
|
||||
return null;
|
||||
}
|
||||
);
|
||||
HookIsolator.displayName = 'HookIsolator';
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Button, Flex, Heading, Image, Link, Text } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useClipboard } from 'common/hooks/useClipboard';
|
||||
import { selectConfigSlice } from 'features/system/store/configSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import newGithubIssueUrl from 'new-github-issue-url';
|
||||
@@ -20,15 +21,17 @@ const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLoc
|
||||
const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const isLocal = useAppSelector(selectIsLocal);
|
||||
const clipboard = useClipboard();
|
||||
|
||||
const handleCopy = useCallback(() => {
|
||||
const text = JSON.stringify(serializeError(error), null, 2);
|
||||
navigator.clipboard.writeText(`\`\`\`\n${text}\n\`\`\``);
|
||||
toast({
|
||||
id: 'ERROR_COPIED',
|
||||
title: t('toast.errorCopied'),
|
||||
clipboard.writeText(`\`\`\`\n${text}\n\`\`\``, () => {
|
||||
toast({
|
||||
id: 'ERROR_COPIED',
|
||||
title: t('toast.errorCopied'),
|
||||
});
|
||||
});
|
||||
}, [error, t]);
|
||||
}, [clipboard, error, t]);
|
||||
|
||||
const url = useMemo(() => {
|
||||
if (isLocal) {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import '@fontsource-variable/inter';
|
||||
import 'overlayscrollbars/overlayscrollbars.css';
|
||||
import '@xyflow/react/dist/base.css';
|
||||
|
||||
import { ChakraProvider, DarkMode, extendTheme, theme as _theme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
|
||||
import type { ReactNode } from 'react';
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
import { createDraftSafeSelectorCreator, createSelectorCreator, lruMemoize } from '@reduxjs/toolkit';
|
||||
import type { GetSelectorsOptions } from '@reduxjs/toolkit/dist/entities/state_selectors';
|
||||
import type { RootState } from 'app/store/store';
|
||||
import { isEqual } from 'lodash-es';
|
||||
|
||||
/**
|
||||
@@ -14,11 +12,9 @@ export const createMemoizedSelector = createSelectorCreator({
|
||||
argsMemoize: lruMemoize,
|
||||
});
|
||||
|
||||
export const getSelectorsOptions: GetSelectorsOptions = {
|
||||
export const getSelectorsOptions = {
|
||||
createSelector: createDraftSafeSelectorCreator({
|
||||
memoize: lruMemoize,
|
||||
argsMemoize: lruMemoize,
|
||||
}),
|
||||
};
|
||||
|
||||
export const createMemoizedAppSelector = createMemoizedSelector.withTypes<RootState>();
|
||||
|
||||
@@ -3,7 +3,7 @@ import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'
|
||||
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { t } from 'i18next';
|
||||
import { truncate, upperFirst } from 'lodash-es';
|
||||
import { truncate } from 'lodash-es';
|
||||
import { serializeError } from 'serialize-error';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { JsonObject } from 'type-fest';
|
||||
@@ -52,15 +52,12 @@ export const addBatchEnqueuedListener = (startAppListening: AppStartListening) =
|
||||
const result = zPydanticValidationError.safeParse(response);
|
||||
if (result.success) {
|
||||
result.data.data.detail.map((e) => {
|
||||
const description = truncate(e.msg.replace(/^(Value|Index|Key) error, /i, ''), { length: 256 });
|
||||
toast({
|
||||
id: 'QUEUE_BATCH_FAILED',
|
||||
title: truncate(upperFirst(e.msg), { length: 128 }),
|
||||
title: t('queue.batchFailedToQueue'),
|
||||
status: 'error',
|
||||
description: truncate(
|
||||
`Path:
|
||||
${e.loc.join('.')}`,
|
||||
{ length: 128 }
|
||||
),
|
||||
description,
|
||||
});
|
||||
});
|
||||
} else if (response.status !== 403) {
|
||||
|
||||
@@ -8,12 +8,13 @@ import { imageDeletionConfirmed } from 'features/deleteImageModal/store/actions'
|
||||
import { isModalOpenChanged } from 'features/deleteImageModal/store/slice';
|
||||
import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { isImageFieldInputInstance } from 'features/nodes/types/field';
|
||||
import { fieldImageCollectionValueChanged, fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { isImageFieldCollectionInputInstance, isImageFieldInputInstance } from 'features/nodes/types/field';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { forEach, intersectionBy } from 'lodash-es';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
import type { Param0 } from 'tsafe';
|
||||
|
||||
const log = logger('gallery');
|
||||
|
||||
@@ -21,6 +22,7 @@ const log = logger('gallery');
|
||||
|
||||
// Some utils to delete images from different parts of the app
|
||||
const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
|
||||
const actions: Param0<typeof dispatch>[] = [];
|
||||
state.nodes.present.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
@@ -28,16 +30,28 @@ const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: Im
|
||||
|
||||
forEach(node.data.inputs, (input) => {
|
||||
if (isImageFieldInputInstance(input) && input.value?.image_name === imageDTO.image_name) {
|
||||
dispatch(
|
||||
actions.push(
|
||||
fieldImageValueChanged({
|
||||
nodeId: node.data.id,
|
||||
fieldName: input.name,
|
||||
value: undefined,
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
if (isImageFieldCollectionInputInstance(input)) {
|
||||
actions.push(
|
||||
fieldImageCollectionValueChanged({
|
||||
nodeId: node.data.id,
|
||||
fieldName: input.name,
|
||||
value: input.value?.filter((value) => value?.image_name !== imageDTO.image_name),
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
actions.forEach(dispatch);
|
||||
};
|
||||
|
||||
const deleteControlLayerImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { $nodeExecutionStates } from 'features/nodes/hooks/useExecutionState';
|
||||
import { $nodeExecutionStates } from 'features/nodes/hooks/useNodeExecutionState';
|
||||
import { workflowLoaded, workflowLoadRequested } from 'features/nodes/store/actions';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { $needsFit } from 'features/nodes/store/reactFlowInstance';
|
||||
|
||||
@@ -23,9 +23,11 @@ export type AppFeature =
|
||||
| 'pauseQueue'
|
||||
| 'resumeQueue'
|
||||
| 'invocationCache'
|
||||
| 'modelCache'
|
||||
| 'bulkDownload'
|
||||
| 'starterModels'
|
||||
| 'hfToken';
|
||||
| 'hfToken'
|
||||
| 'retryQueueItem';
|
||||
/**
|
||||
* A disable-able Stable Diffusion feature
|
||||
*/
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import type { As, ChakraProps, FlexProps } from '@invoke-ai/ui-library';
|
||||
import type { ChakraProps, FlexProps } from '@invoke-ai/ui-library';
|
||||
import { Flex, Icon, Skeleton, Spinner, Text } from '@invoke-ai/ui-library';
|
||||
import type { ElementType } from 'react';
|
||||
import { memo, useMemo } from 'react';
|
||||
import { PiImageBold } from 'react-icons/pi';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
@@ -28,7 +29,7 @@ IAILoadingImageFallback.displayName = 'IAILoadingImageFallback';
|
||||
|
||||
type IAINoImageFallbackProps = FlexProps & {
|
||||
label?: string;
|
||||
icon?: As | null;
|
||||
icon?: ElementType | null;
|
||||
boxSize?: ChakraProps['boxSize'];
|
||||
};
|
||||
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
import { Box } from '@invoke-ai/ui-library';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
type Props = {
|
||||
isSelected: boolean;
|
||||
isHovered: boolean;
|
||||
};
|
||||
const SelectionOverlay = ({ isSelected, isHovered }: Props) => {
|
||||
const shadow = useMemo(() => {
|
||||
if (isSelected && isHovered) {
|
||||
return 'nodeHoveredSelected';
|
||||
}
|
||||
if (isSelected) {
|
||||
return 'nodeSelected';
|
||||
}
|
||||
if (isHovered) {
|
||||
return 'nodeHovered';
|
||||
}
|
||||
return undefined;
|
||||
}, [isHovered, isSelected]);
|
||||
return (
|
||||
<Box
|
||||
className="selection-box"
|
||||
position="absolute"
|
||||
top={0}
|
||||
insetInlineEnd={0}
|
||||
bottom={0}
|
||||
insetInlineStart={0}
|
||||
borderRadius="base"
|
||||
opacity={isSelected || isHovered ? 1 : 0.5}
|
||||
transitionProperty="common"
|
||||
transitionDuration="0.1s"
|
||||
pointerEvents="none"
|
||||
shadow={shadow}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(SelectionOverlay);
|
||||
@@ -1,238 +0,0 @@
|
||||
import { useToken } from '@invoke-ai/ui-library';
|
||||
|
||||
export const useChakraThemeTokens = () => {
|
||||
const [
|
||||
base50,
|
||||
base100,
|
||||
base150,
|
||||
base200,
|
||||
base250,
|
||||
base300,
|
||||
base350,
|
||||
base400,
|
||||
base450,
|
||||
base500,
|
||||
base550,
|
||||
base600,
|
||||
base650,
|
||||
base700,
|
||||
base750,
|
||||
base800,
|
||||
base850,
|
||||
base900,
|
||||
base950,
|
||||
accent50,
|
||||
accent100,
|
||||
accent150,
|
||||
accent200,
|
||||
accent250,
|
||||
accent300,
|
||||
accent350,
|
||||
accent400,
|
||||
accent450,
|
||||
accent500,
|
||||
accent550,
|
||||
accent600,
|
||||
accent650,
|
||||
accent700,
|
||||
accent750,
|
||||
accent800,
|
||||
accent850,
|
||||
accent900,
|
||||
accent950,
|
||||
baseAlpha50,
|
||||
baseAlpha100,
|
||||
baseAlpha150,
|
||||
baseAlpha200,
|
||||
baseAlpha250,
|
||||
baseAlpha300,
|
||||
baseAlpha350,
|
||||
baseAlpha400,
|
||||
baseAlpha450,
|
||||
baseAlpha500,
|
||||
baseAlpha550,
|
||||
baseAlpha600,
|
||||
baseAlpha650,
|
||||
baseAlpha700,
|
||||
baseAlpha750,
|
||||
baseAlpha800,
|
||||
baseAlpha850,
|
||||
baseAlpha900,
|
||||
baseAlpha950,
|
||||
accentAlpha50,
|
||||
accentAlpha100,
|
||||
accentAlpha150,
|
||||
accentAlpha200,
|
||||
accentAlpha250,
|
||||
accentAlpha300,
|
||||
accentAlpha350,
|
||||
accentAlpha400,
|
||||
accentAlpha450,
|
||||
accentAlpha500,
|
||||
accentAlpha550,
|
||||
accentAlpha600,
|
||||
accentAlpha650,
|
||||
accentAlpha700,
|
||||
accentAlpha750,
|
||||
accentAlpha800,
|
||||
accentAlpha850,
|
||||
accentAlpha900,
|
||||
accentAlpha950,
|
||||
] = useToken('colors', [
|
||||
'base.50',
|
||||
'base.100',
|
||||
'base.150',
|
||||
'base.200',
|
||||
'base.250',
|
||||
'base.300',
|
||||
'base.350',
|
||||
'base.400',
|
||||
'base.450',
|
||||
'base.500',
|
||||
'base.550',
|
||||
'base.600',
|
||||
'base.650',
|
||||
'base.700',
|
||||
'base.750',
|
||||
'base.800',
|
||||
'base.850',
|
||||
'base.900',
|
||||
'base.950',
|
||||
'accent.50',
|
||||
'accent.100',
|
||||
'accent.150',
|
||||
'accent.200',
|
||||
'accent.250',
|
||||
'accent.300',
|
||||
'accent.350',
|
||||
'accent.400',
|
||||
'accent.450',
|
||||
'accent.500',
|
||||
'accent.550',
|
||||
'accent.600',
|
||||
'accent.650',
|
||||
'accent.700',
|
||||
'accent.750',
|
||||
'accent.800',
|
||||
'accent.850',
|
||||
'accent.900',
|
||||
'accent.950',
|
||||
'baseAlpha.50',
|
||||
'baseAlpha.100',
|
||||
'baseAlpha.150',
|
||||
'baseAlpha.200',
|
||||
'baseAlpha.250',
|
||||
'baseAlpha.300',
|
||||
'baseAlpha.350',
|
||||
'baseAlpha.400',
|
||||
'baseAlpha.450',
|
||||
'baseAlpha.500',
|
||||
'baseAlpha.550',
|
||||
'baseAlpha.600',
|
||||
'baseAlpha.650',
|
||||
'baseAlpha.700',
|
||||
'baseAlpha.750',
|
||||
'baseAlpha.800',
|
||||
'baseAlpha.850',
|
||||
'baseAlpha.900',
|
||||
'baseAlpha.950',
|
||||
'accentAlpha.50',
|
||||
'accentAlpha.100',
|
||||
'accentAlpha.150',
|
||||
'accentAlpha.200',
|
||||
'accentAlpha.250',
|
||||
'accentAlpha.300',
|
||||
'accentAlpha.350',
|
||||
'accentAlpha.400',
|
||||
'accentAlpha.450',
|
||||
'accentAlpha.500',
|
||||
'accentAlpha.550',
|
||||
'accentAlpha.600',
|
||||
'accentAlpha.650',
|
||||
'accentAlpha.700',
|
||||
'accentAlpha.750',
|
||||
'accentAlpha.800',
|
||||
'accentAlpha.850',
|
||||
'accentAlpha.900',
|
||||
'accentAlpha.950',
|
||||
]);
|
||||
|
||||
return {
|
||||
base50,
|
||||
base100,
|
||||
base150,
|
||||
base200,
|
||||
base250,
|
||||
base300,
|
||||
base350,
|
||||
base400,
|
||||
base450,
|
||||
base500,
|
||||
base550,
|
||||
base600,
|
||||
base650,
|
||||
base700,
|
||||
base750,
|
||||
base800,
|
||||
base850,
|
||||
base900,
|
||||
base950,
|
||||
accent50,
|
||||
accent100,
|
||||
accent150,
|
||||
accent200,
|
||||
accent250,
|
||||
accent300,
|
||||
accent350,
|
||||
accent400,
|
||||
accent450,
|
||||
accent500,
|
||||
accent550,
|
||||
accent600,
|
||||
accent650,
|
||||
accent700,
|
||||
accent750,
|
||||
accent800,
|
||||
accent850,
|
||||
accent900,
|
||||
accent950,
|
||||
baseAlpha50,
|
||||
baseAlpha100,
|
||||
baseAlpha150,
|
||||
baseAlpha200,
|
||||
baseAlpha250,
|
||||
baseAlpha300,
|
||||
baseAlpha350,
|
||||
baseAlpha400,
|
||||
baseAlpha450,
|
||||
baseAlpha500,
|
||||
baseAlpha550,
|
||||
baseAlpha600,
|
||||
baseAlpha650,
|
||||
baseAlpha700,
|
||||
baseAlpha750,
|
||||
baseAlpha800,
|
||||
baseAlpha850,
|
||||
baseAlpha900,
|
||||
baseAlpha950,
|
||||
accentAlpha50,
|
||||
accentAlpha100,
|
||||
accentAlpha150,
|
||||
accentAlpha200,
|
||||
accentAlpha250,
|
||||
accentAlpha300,
|
||||
accentAlpha350,
|
||||
accentAlpha400,
|
||||
accentAlpha450,
|
||||
accentAlpha500,
|
||||
accentAlpha550,
|
||||
accentAlpha600,
|
||||
accentAlpha650,
|
||||
accentAlpha700,
|
||||
accentAlpha750,
|
||||
accentAlpha800,
|
||||
accentAlpha850,
|
||||
accentAlpha900,
|
||||
accentAlpha950,
|
||||
};
|
||||
};
|
||||
81
invokeai/frontend/web/src/common/hooks/useClipboard.tsx
Normal file
81
invokeai/frontend/web/src/common/hooks/useClipboard.tsx
Normal file
@@ -0,0 +1,81 @@
|
||||
/* eslint-disable no-restricted-properties */
|
||||
|
||||
import { ExternalLink, Text } from '@invoke-ai/ui-library';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import type { Param0 } from 'tsafe';
|
||||
|
||||
const CLIPBOARD_FAQ_URL = 'https://invoke-ai.github.io/InvokeAI/faq/#unable-to-copy-on-firefox';
|
||||
|
||||
export const useClipboard = () => {
|
||||
const { t } = useTranslation();
|
||||
const alertClipboardNotAvailable = useCallback(() => {
|
||||
toast({
|
||||
id: 'CLIPBOARD_UNAVAILABLE',
|
||||
title: t('toast.unableToCopy'),
|
||||
description: (
|
||||
<>
|
||||
<Text fontSize="md">
|
||||
{t('toast.unableToCopyDesc')}
|
||||
<ExternalLink
|
||||
display="inline"
|
||||
fontWeight="semibold"
|
||||
href={CLIPBOARD_FAQ_URL}
|
||||
label={t('toast.unableToCopyDesc_theseSteps')}
|
||||
/>
|
||||
.
|
||||
</Text>
|
||||
</>
|
||||
),
|
||||
status: 'error',
|
||||
});
|
||||
}, [t]);
|
||||
|
||||
const isAvailable = useMemo(() => {
|
||||
if (!navigator.clipboard || !window.ClipboardItem) {
|
||||
return false;
|
||||
}
|
||||
// TODO(psyche): Should we query the permissions API?
|
||||
return true;
|
||||
}, []);
|
||||
|
||||
const writeText = useCallback(
|
||||
(data: Param0<Clipboard['writeText']>, onCopy?: () => void) => {
|
||||
if (!isAvailable) {
|
||||
alertClipboardNotAvailable();
|
||||
return;
|
||||
}
|
||||
navigator.clipboard.writeText(data);
|
||||
onCopy?.();
|
||||
},
|
||||
[alertClipboardNotAvailable, isAvailable]
|
||||
);
|
||||
|
||||
const write = useCallback(
|
||||
(data: Param0<Clipboard['write']>, onCopy?: () => void) => {
|
||||
if (!isAvailable) {
|
||||
alertClipboardNotAvailable();
|
||||
return;
|
||||
}
|
||||
navigator.clipboard.write(data);
|
||||
onCopy?.();
|
||||
},
|
||||
[alertClipboardNotAvailable, isAvailable]
|
||||
);
|
||||
|
||||
const writeImage = useCallback(
|
||||
(blob: Blob, onCopy?: () => void) => {
|
||||
if (!isAvailable) {
|
||||
alertClipboardNotAvailable();
|
||||
return;
|
||||
}
|
||||
const data = [new ClipboardItem({ ['image/png']: blob })];
|
||||
navigator.clipboard.write(data);
|
||||
onCopy?.();
|
||||
},
|
||||
[alertClipboardNotAvailable, isAvailable]
|
||||
);
|
||||
|
||||
return { isAvailable, writeText, write, writeImage };
|
||||
};
|
||||
@@ -1,26 +1,15 @@
|
||||
import { useClipboard } from 'common/hooks/useClipboard';
|
||||
import { convertImageUrlToBlob } from 'common/util/convertImageUrlToBlob';
|
||||
import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const useCopyImageToClipboard = () => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const isClipboardAPIAvailable = useMemo(() => {
|
||||
return Boolean(navigator.clipboard) && Boolean(window.ClipboardItem);
|
||||
}, []);
|
||||
const clipboard = useClipboard();
|
||||
|
||||
const copyImageToClipboard = useCallback(
|
||||
async (image_url: string) => {
|
||||
if (!isClipboardAPIAvailable) {
|
||||
toast({
|
||||
id: 'PROBLEM_COPYING_IMAGE',
|
||||
title: t('toast.problemCopyingImage'),
|
||||
description: "Your browser doesn't support the Clipboard API.",
|
||||
status: 'error',
|
||||
});
|
||||
}
|
||||
try {
|
||||
const blob = await convertImageUrlToBlob(image_url);
|
||||
|
||||
@@ -28,12 +17,12 @@ export const useCopyImageToClipboard = () => {
|
||||
throw new Error('Unable to create Blob');
|
||||
}
|
||||
|
||||
copyBlobToClipboard(blob);
|
||||
|
||||
toast({
|
||||
id: 'IMAGE_COPIED',
|
||||
title: t('toast.imageCopied'),
|
||||
status: 'success',
|
||||
clipboard.writeImage(blob, () => {
|
||||
toast({
|
||||
id: 'IMAGE_COPIED',
|
||||
title: t('toast.imageCopied'),
|
||||
status: 'success',
|
||||
});
|
||||
});
|
||||
} catch (err) {
|
||||
toast({
|
||||
@@ -44,8 +33,8 @@ export const useCopyImageToClipboard = () => {
|
||||
});
|
||||
}
|
||||
},
|
||||
[isClipboardAPIAvailable, t]
|
||||
[clipboard, t]
|
||||
);
|
||||
|
||||
return { isClipboardAPIAvailable, copyImageToClipboard };
|
||||
return copyImageToClipboard;
|
||||
};
|
||||
|
||||
72
invokeai/frontend/web/src/common/hooks/useEditable.ts
Normal file
72
invokeai/frontend/web/src/common/hooks/useEditable.ts
Normal file
@@ -0,0 +1,72 @@
|
||||
import type { ChangeEvent, KeyboardEvent, RefObject } from 'react';
|
||||
import { useCallback, useEffect, useState } from 'react';
|
||||
|
||||
type UseEditableArg = {
|
||||
value: string;
|
||||
defaultValue: string;
|
||||
onChange: (value: string) => void;
|
||||
onStartEditing?: () => void;
|
||||
inputRef?: RefObject<HTMLInputElement | HTMLTextAreaElement>;
|
||||
};
|
||||
|
||||
export const useEditable = ({ value, defaultValue, onChange: _onChange, onStartEditing, inputRef }: UseEditableArg) => {
|
||||
const [isEditing, setIsEditing] = useState(false);
|
||||
const [localValue, setLocalValue] = useState(value);
|
||||
|
||||
const onBlur = useCallback(() => {
|
||||
const trimmedValue = localValue.trim();
|
||||
const newValue = trimmedValue || defaultValue;
|
||||
setLocalValue(newValue);
|
||||
if (newValue !== value) {
|
||||
_onChange(newValue);
|
||||
}
|
||||
setIsEditing(false);
|
||||
inputRef?.current?.setSelectionRange(0, 0);
|
||||
}, [localValue, defaultValue, value, inputRef, _onChange]);
|
||||
|
||||
const onChange = useCallback((e: ChangeEvent<HTMLInputElement | HTMLTextAreaElement>) => {
|
||||
setLocalValue(e.target.value);
|
||||
}, []);
|
||||
|
||||
const onKeyDown = useCallback(
|
||||
(e: KeyboardEvent<HTMLInputElement | HTMLTextAreaElement>) => {
|
||||
if (e.key === 'Enter' && !e.shiftKey) {
|
||||
onBlur();
|
||||
} else if (e.key === 'Escape') {
|
||||
setLocalValue(value);
|
||||
_onChange(value);
|
||||
setIsEditing(false);
|
||||
}
|
||||
},
|
||||
[_onChange, onBlur, value]
|
||||
);
|
||||
|
||||
const startEditing = useCallback(() => {
|
||||
setIsEditing(true);
|
||||
onStartEditing?.();
|
||||
}, [onStartEditing]);
|
||||
|
||||
useEffect(() => {
|
||||
// Another component may change the title; sync local title with global state
|
||||
setLocalValue(value);
|
||||
}, [value]);
|
||||
|
||||
useEffect(() => {
|
||||
if (isEditing) {
|
||||
inputRef?.current?.focus();
|
||||
inputRef?.current?.select();
|
||||
}
|
||||
}, [inputRef, isEditing]);
|
||||
|
||||
return {
|
||||
isEditing,
|
||||
startEditing,
|
||||
value: localValue,
|
||||
inputProps: {
|
||||
value: localValue,
|
||||
onChange,
|
||||
onKeyDown,
|
||||
onBlur,
|
||||
},
|
||||
};
|
||||
};
|
||||
@@ -2,6 +2,7 @@ import { Menu, MenuButton, MenuGroup, MenuItem, MenuList } from '@invoke-ai/ui-l
|
||||
import { SubMenuButtonContent, useSubMenu } from 'common/hooks/useSubMenu';
|
||||
import { CanvasContextMenuItemsCropCanvasToBbox } from 'features/controlLayers/components/CanvasContextMenu/CanvasContextMenuItemsCropCanvasToBbox';
|
||||
import { NewLayerIcon } from 'features/controlLayers/components/common/icons';
|
||||
import { useCopyCanvasToClipboard } from 'features/controlLayers/hooks/copyHooks';
|
||||
import {
|
||||
useNewControlLayerFromBbox,
|
||||
useNewGlobalReferenceImageFromBbox,
|
||||
@@ -13,12 +14,13 @@ import {
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiFloppyDiskBold } from 'react-icons/pi';
|
||||
import { PiCopyBold, PiFloppyDiskBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasContextMenuGlobalMenuItems = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const saveSubMenu = useSubMenu();
|
||||
const newSubMenu = useSubMenu();
|
||||
const copySubMenu = useSubMenu();
|
||||
const isBusy = useCanvasIsBusy();
|
||||
const saveCanvasToGallery = useSaveCanvasToGallery();
|
||||
const saveBboxToGallery = useSaveBboxToGallery();
|
||||
@@ -26,6 +28,8 @@ export const CanvasContextMenuGlobalMenuItems = memo(() => {
|
||||
const newGlobalReferenceImageFromBbox = useNewGlobalReferenceImageFromBbox();
|
||||
const newRasterLayerFromBbox = useNewRasterLayerFromBbox();
|
||||
const newControlLayerFromBbox = useNewControlLayerFromBbox();
|
||||
const copyCanvasToClipboard = useCopyCanvasToClipboard('canvas');
|
||||
const copyBboxToClipboard = useCopyCanvasToClipboard('bbox');
|
||||
|
||||
return (
|
||||
<>
|
||||
@@ -67,6 +71,21 @@ export const CanvasContextMenuGlobalMenuItems = memo(() => {
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
<MenuItem {...copySubMenu.parentMenuItemProps} icon={<PiCopyBold />}>
|
||||
<Menu {...copySubMenu.menuProps}>
|
||||
<MenuButton {...copySubMenu.menuButtonProps}>
|
||||
<SubMenuButtonContent label={t('controlLayers.canvasContextMenu.copyToClipboard')} />
|
||||
</MenuButton>
|
||||
<MenuList {...copySubMenu.menuListProps}>
|
||||
<MenuItem icon={<PiCopyBold />} isDisabled={isBusy} onClick={copyCanvasToClipboard}>
|
||||
{t('controlLayers.canvasContextMenu.copyCanvasToClipboard')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiCopyBold />} isDisabled={isBusy} onClick={copyBboxToClipboard}>
|
||||
{t('controlLayers.canvasContextMenu.copyBboxToClipboard')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
</MenuItem>
|
||||
</MenuGroup>
|
||||
</>
|
||||
);
|
||||
|
||||
@@ -0,0 +1,150 @@
|
||||
import {
|
||||
Button,
|
||||
Flex,
|
||||
Modal,
|
||||
ModalBody,
|
||||
ModalCloseButton,
|
||||
ModalContent,
|
||||
ModalFooter,
|
||||
ModalHeader,
|
||||
ModalOverlay,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useAppStore } from 'app/store/nanostores/store';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { createNewCanvasEntityFromImage } from 'features/imageActions/actions';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { atom } from 'nanostores';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiBoundingBoxBold, PiImageBold } from 'react-icons/pi';
|
||||
import { useUploadImageMutation } from 'services/api/endpoints/images';
|
||||
|
||||
const $imageFile = atom<File | null>(null);
|
||||
export const setFileToPaste = (file: File) => $imageFile.set(file);
|
||||
const clearFileToPaste = () => $imageFile.set(null);
|
||||
|
||||
export const CanvasPasteModal = memo(() => {
|
||||
useAssertSingleton('CanvasPasteModal');
|
||||
const { dispatch, getState } = useAppStore();
|
||||
const { t } = useTranslation();
|
||||
const imageToPaste = useStore($imageFile);
|
||||
const canvasManager = useCanvasManager();
|
||||
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
|
||||
const [uploadImage, { isLoading }] = useUploadImageMutation({ fixedCacheKey: 'canvasPasteModal' });
|
||||
|
||||
const getPosition = useCallback(
|
||||
(destination: 'canvas' | 'bbox') => {
|
||||
const { x, y } = canvasManager.stateApi.getBbox().rect;
|
||||
if (destination === 'bbox') {
|
||||
return { x, y };
|
||||
}
|
||||
const rasterLayerAdapters = canvasManager.compositor.getVisibleAdaptersOfType('raster_layer');
|
||||
if (rasterLayerAdapters.length === 0) {
|
||||
return { x, y };
|
||||
}
|
||||
{
|
||||
const { x, y } = canvasManager.compositor.getRectOfAdapters(rasterLayerAdapters);
|
||||
return { x, y };
|
||||
}
|
||||
},
|
||||
[canvasManager.compositor, canvasManager.stateApi]
|
||||
);
|
||||
|
||||
const handlePaste = useCallback(
|
||||
async (file: File, destination: 'assets' | 'canvas' | 'bbox') => {
|
||||
try {
|
||||
const is_intermediate = destination !== 'assets';
|
||||
const imageDTO = await uploadImage({
|
||||
file,
|
||||
is_intermediate,
|
||||
image_category: 'user',
|
||||
board_id: autoAddBoardId === 'none' ? undefined : autoAddBoardId,
|
||||
}).unwrap();
|
||||
|
||||
if (destination !== 'assets') {
|
||||
createNewCanvasEntityFromImage({
|
||||
type: 'raster_layer',
|
||||
imageDTO,
|
||||
dispatch,
|
||||
getState,
|
||||
overrides: { position: getPosition(destination) },
|
||||
});
|
||||
}
|
||||
} catch {
|
||||
toast({
|
||||
title: t('toast.pasteFailed'),
|
||||
status: 'error',
|
||||
});
|
||||
} finally {
|
||||
clearFileToPaste();
|
||||
toast({
|
||||
title: t('toast.pasteSuccess', {
|
||||
destination:
|
||||
destination === 'assets'
|
||||
? t('controlLayers.pasteToAssets')
|
||||
: destination === 'bbox'
|
||||
? t('controlLayers.pasteToBbox')
|
||||
: t('controlLayers.pasteToCanvas'),
|
||||
}),
|
||||
status: 'success',
|
||||
});
|
||||
}
|
||||
},
|
||||
[autoAddBoardId, dispatch, getPosition, getState, t, uploadImage]
|
||||
);
|
||||
|
||||
const pasteToAssets = useCallback(() => {
|
||||
if (!imageToPaste) {
|
||||
return;
|
||||
}
|
||||
handlePaste(imageToPaste, 'assets');
|
||||
}, [handlePaste, imageToPaste]);
|
||||
|
||||
const pasteToCanvas = useCallback(() => {
|
||||
if (!imageToPaste) {
|
||||
return;
|
||||
}
|
||||
handlePaste(imageToPaste, 'canvas');
|
||||
}, [handlePaste, imageToPaste]);
|
||||
|
||||
const pasteToBbox = useCallback(() => {
|
||||
if (!imageToPaste) {
|
||||
return;
|
||||
}
|
||||
handlePaste(imageToPaste, 'bbox');
|
||||
}, [handlePaste, imageToPaste]);
|
||||
|
||||
return (
|
||||
<Modal isOpen={imageToPaste !== null} onClose={clearFileToPaste} useInert={false} isCentered size="2xl">
|
||||
<ModalOverlay />
|
||||
<ModalContent>
|
||||
<ModalHeader>{t('controlLayers.pasteTo')}</ModalHeader>
|
||||
<ModalCloseButton />
|
||||
<ModalBody display="flex" justifyContent="center">
|
||||
<Flex flexDir="column" gap={4} w="min-content">
|
||||
<Button size="lg" onClick={pasteToCanvas} isDisabled={isLoading} leftIcon={<PiImageBold />}>
|
||||
{t('controlLayers.pasteToCanvasDesc')}
|
||||
</Button>
|
||||
<Button size="lg" onClick={pasteToBbox} isDisabled={isLoading} leftIcon={<PiBoundingBoxBold />}>
|
||||
{t('controlLayers.pasteToBboxDesc')}
|
||||
</Button>
|
||||
<Button size="lg" onClick={pasteToAssets} isDisabled={isLoading} variant="ghost">
|
||||
{t('controlLayers.pasteToAssetsDesc')}
|
||||
</Button>
|
||||
</Flex>
|
||||
</ModalBody>
|
||||
<ModalFooter>
|
||||
<Button onClick={clearFileToPaste} variant="ghost" isLoading={isLoading}>
|
||||
{t('common.cancel')}
|
||||
</Button>
|
||||
</ModalFooter>
|
||||
</ModalContent>
|
||||
</Modal>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasPasteModal.displayName = 'CanvasPasteModal';
|
||||
@@ -1,5 +1,5 @@
|
||||
import { Flex, IconButton } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedAppSelector } from 'app/store/createMemoizedSelector';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppStore } from 'app/store/nanostores/store';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
|
||||
@@ -34,7 +34,7 @@ import type {
|
||||
} from 'services/api/types';
|
||||
|
||||
const buildSelectControlAdapter = (entityIdentifier: CanvasEntityIdentifier<'control_layer'>) =>
|
||||
createMemoizedAppSelector(selectCanvasSlice, (canvas) => {
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const layer = selectEntityOrThrow(canvas, entityIdentifier, 'ControlLayerControlAdapter');
|
||||
return layer.controlAdapter;
|
||||
});
|
||||
|
||||
@@ -0,0 +1,99 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, CompositeNumberInput, CompositeSlider, FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import type { AdjustImageFilterConfig, AjustImageChannels } from 'features/controlLayers/store/filters';
|
||||
import { IMAGE_FILTERS, isAjustImageChannels } from 'features/controlLayers/store/filters';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import type { FilterComponentProps } from './types';
|
||||
|
||||
type Props = FilterComponentProps<AdjustImageFilterConfig>;
|
||||
const DEFAULTS = IMAGE_FILTERS.adjust_image.buildDefaults();
|
||||
|
||||
export const FilterAdjustImage = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const handleChannelChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
if (!isAjustImageChannels(v?.value)) {
|
||||
return;
|
||||
}
|
||||
onChange({ ...config, channel: v.value });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleValueChange = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, value: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleScaleChange = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, scale_values: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const options: { label: string; value: AjustImageChannels }[] = useMemo(
|
||||
() => [
|
||||
{ label: t('controlLayers.filter.adjust_image.red'), value: 'Red (RGBA)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.green'), value: 'Green (RGBA)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.blue'), value: 'Blue (RGBA)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.alpha'), value: 'Alpha (RGBA)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.cyan'), value: 'Cyan (CMYK)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.magenta'), value: 'Magenta (CMYK)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.yellow'), value: 'Yellow (CMYK)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.black'), value: 'Black (CMYK)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.hue'), value: 'Hue (HSV)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.saturation'), value: 'Saturation (HSV)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.value'), value: 'Value (HSV)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.luminosity'), value: 'Luminosity (LAB)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.a'), value: 'A (LAB)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.b'), value: 'B (LAB)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.y'), value: 'Y (YCbCr)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.cb'), value: 'Cb (YCbCr)' },
|
||||
{ label: t('controlLayers.filter.adjust_image.cr'), value: 'Cr (YCbCr)' },
|
||||
],
|
||||
[t]
|
||||
);
|
||||
|
||||
const value = useMemo(() => options.filter((o) => o.value === config.channel)[0], [options, config.channel]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlLayers.filter.adjust_image.channel')}</FormLabel>
|
||||
<Combobox value={value} options={options} onChange={handleChannelChange} isSearchable={false} />
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlLayers.filter.adjust_image.value_setting')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.value}
|
||||
defaultValue={DEFAULTS.value}
|
||||
onChange={handleValueChange}
|
||||
min={0}
|
||||
max={2}
|
||||
step={0.0025}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.value}
|
||||
defaultValue={DEFAULTS.value}
|
||||
onChange={handleValueChange}
|
||||
min={0}
|
||||
max={255}
|
||||
step={0.0025}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl w="max-content">
|
||||
<FormLabel m={0}>{t('controlLayers.filter.adjust_image.scale_values')}</FormLabel>
|
||||
<Switch defaultChecked={DEFAULTS.scale_values} isChecked={config.scale_values} onChange={handleScaleChange} />
|
||||
</FormControl>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
FilterAdjustImage.displayName = 'FilterAdjustImage';
|
||||
@@ -1,4 +1,5 @@
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import { FilterAdjustImage } from 'features/controlLayers/components/Filters/FilterAdjustImage';
|
||||
import { FilterBlur } from 'features/controlLayers/components/Filters/FilterBlur';
|
||||
import { FilterCannyEdgeDetection } from 'features/controlLayers/components/Filters/FilterCannyEdgeDetection';
|
||||
import { FilterColorMap } from 'features/controlLayers/components/Filters/FilterColorMap';
|
||||
@@ -21,8 +22,8 @@ type Props = { filterConfig: FilterConfig; onChange: (filterConfig: FilterConfig
|
||||
export const FilterSettings = memo(({ filterConfig, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
if (filterConfig.type === 'img_blur') {
|
||||
return <FilterBlur config={filterConfig} onChange={onChange} />;
|
||||
if (filterConfig.type === 'adjust_image') {
|
||||
return <FilterAdjustImage config={filterConfig} onChange={onChange} />;
|
||||
}
|
||||
|
||||
if (filterConfig.type === 'canny_edge_detection') {
|
||||
@@ -65,6 +66,10 @@ export const FilterSettings = memo(({ filterConfig, onChange }: Props) => {
|
||||
return <FilterPiDiNetEdgeDetection config={filterConfig} onChange={onChange} />;
|
||||
}
|
||||
|
||||
if (filterConfig.type === 'img_blur') {
|
||||
return <FilterBlur config={filterConfig} onChange={onChange} />;
|
||||
}
|
||||
|
||||
if (filterConfig.type === 'img_noise') {
|
||||
return <FilterNoise config={filterConfig} onChange={onChange} />;
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useEntityAdapterSafe } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useCopyLayerToClipboard } from 'features/controlLayers/hooks/copyHooks';
|
||||
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
|
||||
import { useCopyLayerToClipboard } from 'features/controlLayers/hooks/useCopyLayerToClipboard';
|
||||
import { useEntityIsEmpty } from 'features/controlLayers/hooks/useEntityIsEmpty';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
@@ -1,67 +1,43 @@
|
||||
import { Input } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useBoolean } from 'common/hooks/useBoolean';
|
||||
import { useEditable } from 'common/hooks/useEditable';
|
||||
import { CanvasEntityTitle } from 'features/controlLayers/components/common/CanvasEntityTitle';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { useEntityTitle } from 'features/controlLayers/hooks/useEntityTitle';
|
||||
import { useEntityName, useEntityTypeName } from 'features/controlLayers/hooks/useEntityTitle';
|
||||
import { entityNameChanged } from 'features/controlLayers/store/canvasSlice';
|
||||
import type { ChangeEvent, KeyboardEvent } from 'react';
|
||||
import { memo, useCallback, useEffect, useRef, useState } from 'react';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
|
||||
export const CanvasEntityEditableTitle = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const title = useEntityTitle(entityIdentifier);
|
||||
const isEditing = useBoolean(false);
|
||||
const [localTitle, setLocalTitle] = useState(title);
|
||||
const ref = useRef<HTMLInputElement>(null);
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
const name = useEntityName(entityIdentifier);
|
||||
const typeName = useEntityTypeName(entityIdentifier.type);
|
||||
|
||||
const onChange = useCallback((e: ChangeEvent<HTMLInputElement>) => {
|
||||
setLocalTitle(e.target.value);
|
||||
}, []);
|
||||
|
||||
const onBlur = useCallback(() => {
|
||||
const trimmedTitle = localTitle.trim();
|
||||
if (trimmedTitle.length === 0) {
|
||||
dispatch(entityNameChanged({ entityIdentifier, name: null }));
|
||||
} else if (trimmedTitle !== title) {
|
||||
dispatch(entityNameChanged({ entityIdentifier, name: trimmedTitle }));
|
||||
}
|
||||
isEditing.setFalse();
|
||||
}, [dispatch, entityIdentifier, isEditing, localTitle, title]);
|
||||
|
||||
const onKeyDown = useCallback(
|
||||
(e: KeyboardEvent<HTMLInputElement>) => {
|
||||
if (e.key === 'Enter') {
|
||||
onBlur();
|
||||
} else if (e.key === 'Escape') {
|
||||
setLocalTitle(title);
|
||||
isEditing.setFalse();
|
||||
}
|
||||
const onChange = useCallback(
|
||||
(name: string) => {
|
||||
dispatch(entityNameChanged({ entityIdentifier, name }));
|
||||
},
|
||||
[isEditing, onBlur, title]
|
||||
[dispatch, entityIdentifier]
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (isEditing.isTrue) {
|
||||
ref.current?.focus();
|
||||
ref.current?.select();
|
||||
}
|
||||
}, [isEditing.isTrue]);
|
||||
const editable = useEditable({
|
||||
value: name || typeName,
|
||||
defaultValue: typeName,
|
||||
onChange,
|
||||
inputRef,
|
||||
});
|
||||
|
||||
if (!isEditing.isTrue) {
|
||||
return <CanvasEntityTitle cursor="text" onDoubleClick={isEditing.setTrue} />;
|
||||
if (!editable.isEditing) {
|
||||
return <CanvasEntityTitle cursor="text" onDoubleClick={editable.startEditing} />;
|
||||
}
|
||||
|
||||
return (
|
||||
<Input
|
||||
ref={ref}
|
||||
value={localTitle}
|
||||
onChange={onChange}
|
||||
onBlur={onBlur}
|
||||
onKeyDown={onKeyDown}
|
||||
ref={inputRef}
|
||||
{...editable.inputProps}
|
||||
variant="outline"
|
||||
_focusVisible={{ borderWidth: 1, borderColor: 'invokeBlueAlpha.400', borderRadius: 'base' }}
|
||||
_focusVisible={{ borderRadius: 'base', h: 'unset' }}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
@@ -0,0 +1,91 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { useClipboard } from 'common/hooks/useClipboard';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
|
||||
import type { CanvasEntityAdapterInpaintMask } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterInpaintMask';
|
||||
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
|
||||
import type { CanvasEntityAdapterRegionalGuidance } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRegionalGuidance';
|
||||
import { canvasToBlob } from 'features/controlLayers/konva/util';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { startCase } from 'lodash-es';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { serializeError } from 'serialize-error';
|
||||
|
||||
const log = logger('canvas');
|
||||
|
||||
export const useCopyLayerToClipboard = () => {
|
||||
const { t } = useTranslation();
|
||||
const clipboard = useClipboard();
|
||||
const copyLayerToCipboard = useCallback(
|
||||
async (
|
||||
adapter:
|
||||
| CanvasEntityAdapterRasterLayer
|
||||
| CanvasEntityAdapterControlLayer
|
||||
| CanvasEntityAdapterInpaintMask
|
||||
| CanvasEntityAdapterRegionalGuidance
|
||||
| null
|
||||
) => {
|
||||
if (!adapter) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const canvas = adapter.getCanvas();
|
||||
const blob = await canvasToBlob(canvas);
|
||||
clipboard.writeImage(blob, () => {
|
||||
log.trace('Layer copied to clipboard');
|
||||
toast({
|
||||
status: 'info',
|
||||
title: t('toast.layerCopiedToClipboard'),
|
||||
});
|
||||
});
|
||||
} catch (error) {
|
||||
log.error({ error: serializeError(error) }, 'Problem copying layer to clipboard');
|
||||
toast({
|
||||
status: 'error',
|
||||
title: t('toast.problemCopyingLayer'),
|
||||
});
|
||||
}
|
||||
},
|
||||
[clipboard, t]
|
||||
);
|
||||
|
||||
return copyLayerToCipboard;
|
||||
};
|
||||
|
||||
export const useCopyCanvasToClipboard = (region: 'canvas' | 'bbox') => {
|
||||
const { t } = useTranslation();
|
||||
const clipboard = useClipboard();
|
||||
const canvasManager = useCanvasManager();
|
||||
const copyCanvasToClipboard = useCallback(async () => {
|
||||
const rect =
|
||||
region === 'bbox'
|
||||
? canvasManager.stateApi.getBbox().rect
|
||||
: canvasManager.compositor.getVisibleRectOfType('raster_layer');
|
||||
|
||||
if (rect.width === 0 || rect.height === 0) {
|
||||
toast({
|
||||
title: t('controlLayers.copyRegionError', { region: startCase(region) }),
|
||||
description: t('controlLayers.regionIsEmpty'),
|
||||
status: 'warning',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const rasterAdapters = canvasManager.compositor.getVisibleAdaptersOfType('raster_layer');
|
||||
const canvasElement = canvasManager.compositor.getCompositeCanvas(rasterAdapters, rect);
|
||||
const blob = await canvasToBlob(canvasElement);
|
||||
clipboard.writeImage(blob, () => {
|
||||
log.trace('Region copied to clipboard');
|
||||
toast({ title: t('controlLayers.regionCopiedToClipboard', { region: startCase(region) }) });
|
||||
});
|
||||
} catch (error) {
|
||||
log.error({ error: serializeError(error) }, 'Failed to save canvas to gallery');
|
||||
toast({ title: t('controlLayers.copyRegionError', { region: startCase(region) }), status: 'error' });
|
||||
}
|
||||
}, [canvasManager.compositor, canvasManager.stateApi, clipboard, region, t]);
|
||||
|
||||
return copyCanvasToClipboard;
|
||||
};
|
||||
@@ -1,50 +0,0 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { CanvasEntityAdapterControlLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterControlLayer';
|
||||
import type { CanvasEntityAdapterInpaintMask } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterInpaintMask';
|
||||
import type { CanvasEntityAdapterRasterLayer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRasterLayer';
|
||||
import type { CanvasEntityAdapterRegionalGuidance } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityAdapterRegionalGuidance';
|
||||
import { canvasToBlob } from 'features/controlLayers/konva/util';
|
||||
import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { serializeError } from 'serialize-error';
|
||||
|
||||
const log = logger('canvas');
|
||||
|
||||
export const useCopyLayerToClipboard = () => {
|
||||
const { t } = useTranslation();
|
||||
const copyLayerToCipboard = useCallback(
|
||||
async (
|
||||
adapter:
|
||||
| CanvasEntityAdapterRasterLayer
|
||||
| CanvasEntityAdapterControlLayer
|
||||
| CanvasEntityAdapterInpaintMask
|
||||
| CanvasEntityAdapterRegionalGuidance
|
||||
| null
|
||||
) => {
|
||||
if (!adapter) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const canvas = adapter.getCanvas();
|
||||
const blob = await canvasToBlob(canvas);
|
||||
copyBlobToClipboard(blob);
|
||||
log.trace('Layer copied to clipboard');
|
||||
toast({
|
||||
status: 'info',
|
||||
title: t('toast.layerCopiedToClipboard'),
|
||||
});
|
||||
} catch (error) {
|
||||
log.error({ error: serializeError(error) }, 'Problem copying layer to clipboard');
|
||||
toast({
|
||||
status: 'error',
|
||||
title: t('toast.problemCopyingLayer'),
|
||||
});
|
||||
}
|
||||
},
|
||||
[t]
|
||||
);
|
||||
|
||||
return copyLayerToCipboard;
|
||||
};
|
||||
@@ -15,17 +15,17 @@ const createSelectName = (entityIdentifier: CanvasEntityIdentifier) =>
|
||||
return entity.name;
|
||||
});
|
||||
|
||||
export const useEntityTitle = (entityIdentifier: CanvasEntityIdentifier) => {
|
||||
const { t } = useTranslation();
|
||||
export const useEntityName = (entityIdentifier: CanvasEntityIdentifier) => {
|
||||
const selectName = useMemo(() => createSelectName(entityIdentifier), [entityIdentifier]);
|
||||
const name = useAppSelector(selectName);
|
||||
return name;
|
||||
};
|
||||
|
||||
const title = useMemo(() => {
|
||||
if (name) {
|
||||
return name;
|
||||
}
|
||||
export const useEntityTypeName = (type: CanvasEntityIdentifier['type']) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
switch (entityIdentifier.type) {
|
||||
const typeName = useMemo(() => {
|
||||
switch (type) {
|
||||
case 'inpaint_mask':
|
||||
return t('controlLayers.inpaintMask');
|
||||
case 'control_layer':
|
||||
@@ -39,7 +39,15 @@ export const useEntityTitle = (entityIdentifier: CanvasEntityIdentifier) => {
|
||||
default:
|
||||
assert(false, 'Unexpected entity type');
|
||||
}
|
||||
}, [entityIdentifier.type, name, t]);
|
||||
}, [type, t]);
|
||||
|
||||
return typeName;
|
||||
};
|
||||
|
||||
export const useEntityTitle = (entityIdentifier: CanvasEntityIdentifier) => {
|
||||
const name = useEntityName(entityIdentifier);
|
||||
const typeName = useEntityTypeName(entityIdentifier.type);
|
||||
const title = useMemo(() => name || typeName, [name, typeName]);
|
||||
|
||||
return title;
|
||||
};
|
||||
|
||||
@@ -59,11 +59,11 @@ export class CanvasEntityAdapterInpaintMask extends CanvasEntityAdapterBase<
|
||||
this.syncOpacity();
|
||||
}
|
||||
if (!prevState || this.state.fill !== prevState.fill) {
|
||||
// On first render, we must force the update
|
||||
this.renderer.updateCompositingRectFill(!prevState);
|
||||
// On first render, or when the fill changes, we must force the update
|
||||
this.renderer.updateCompositingRectFill(true);
|
||||
}
|
||||
if (!prevState) {
|
||||
// On first render, we must force the updates
|
||||
if (!prevState || this.state.objects !== prevState.objects) {
|
||||
// On first render, or when the objects change, we must force the update
|
||||
this.renderer.updateCompositingRectSize(true);
|
||||
this.renderer.updateCompositingRectPosition(true);
|
||||
}
|
||||
|
||||
@@ -59,11 +59,11 @@ export class CanvasEntityAdapterRegionalGuidance extends CanvasEntityAdapterBase
|
||||
this.syncOpacity();
|
||||
}
|
||||
if (!prevState || this.state.fill !== prevState.fill) {
|
||||
// On first render, we must force the update
|
||||
this.renderer.updateCompositingRectFill(!prevState);
|
||||
// On first render, or when the fill changes, we must force the update
|
||||
this.renderer.updateCompositingRectFill(true);
|
||||
}
|
||||
if (!prevState) {
|
||||
// On first render, we must force the updates
|
||||
if (!prevState || this.state.objects !== prevState.objects) {
|
||||
// On first render, or when the objects change, we must force the update
|
||||
this.renderer.updateCompositingRectSize(true);
|
||||
this.renderer.updateCompositingRectPosition(true);
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
|
||||
/**
|
||||
* The config for the filter.
|
||||
*/
|
||||
$filterConfig = atom<FilterConfig>(IMAGE_FILTERS.canny_edge_detection.buildDefaults());
|
||||
$filterConfig = atom<FilterConfig>(IMAGE_FILTERS.adjust_image.buildDefaults());
|
||||
|
||||
/**
|
||||
* The initial filter config, used to reset the filter config.
|
||||
@@ -212,7 +212,7 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
|
||||
return filter.buildDefaults();
|
||||
} else {
|
||||
// Otherwise, used the default filter
|
||||
return IMAGE_FILTERS.canny_edge_detection.buildDefaults();
|
||||
return IMAGE_FILTERS.adjust_image.buildDefaults();
|
||||
}
|
||||
};
|
||||
|
||||
@@ -284,8 +284,8 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
|
||||
this.log.error({ error: serializeError(filterResult.error) }, 'Error filtering');
|
||||
this.$isProcessing.set(false);
|
||||
// Clean up the abort controller as needed
|
||||
if (!this.abortController.signal.aborted) {
|
||||
this.abortController.abort();
|
||||
if (!controller.signal.aborted) {
|
||||
controller.abort();
|
||||
}
|
||||
this.abortController = null;
|
||||
return;
|
||||
@@ -324,8 +324,8 @@ export class CanvasEntityFilterer extends CanvasModuleBase {
|
||||
this.$isProcessing.set(false);
|
||||
|
||||
// Clean up the abort controller as needed
|
||||
if (!this.abortController.signal.aborted) {
|
||||
this.abortController.abort();
|
||||
if (!controller.signal.aborted) {
|
||||
controller.abort();
|
||||
}
|
||||
|
||||
this.abortController = null;
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { Mutex } from 'async-mutex';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { withResultAsync } from 'common/util/result';
|
||||
import type { CanvasEntityBufferObjectRenderer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityBufferObjectRenderer';
|
||||
import type { CanvasEntityFilterer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityFilterer';
|
||||
import type { CanvasEntityObjectRenderer } from 'features/controlLayers/konva/CanvasEntity/CanvasEntityObjectRenderer';
|
||||
@@ -7,7 +8,7 @@ import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
|
||||
import type { CanvasSegmentAnythingModule } from 'features/controlLayers/konva/CanvasSegmentAnythingModule';
|
||||
import type { CanvasStagingAreaModule } from 'features/controlLayers/konva/CanvasStagingAreaModule';
|
||||
import { loadImage } from 'features/controlLayers/konva/util';
|
||||
import { getKonvaNodeDebugAttrs, loadImage } from 'features/controlLayers/konva/util';
|
||||
import type { CanvasImageState } from 'features/controlLayers/store/types';
|
||||
import { t } from 'i18next';
|
||||
import Konva from 'konva';
|
||||
@@ -94,36 +95,41 @@ export class CanvasObjectImage extends CanvasModuleBase {
|
||||
}
|
||||
|
||||
updateImageSource = async (imageName: string) => {
|
||||
try {
|
||||
this.log.trace({ imageName }, 'Updating image source');
|
||||
this.log.trace({ imageName }, 'Updating image source');
|
||||
|
||||
this.isLoading = true;
|
||||
this.konva.group.visible(true);
|
||||
this.isLoading = true;
|
||||
this.konva.group.visible(true);
|
||||
|
||||
if (!this.konva.image) {
|
||||
this.konva.placeholder.group.visible(false);
|
||||
this.konva.placeholder.text.text(t('common.loadingImage', 'Loading Image'));
|
||||
}
|
||||
|
||||
const imageDTO = await getImageDTOSafe(imageName);
|
||||
if (imageDTO === null) {
|
||||
this.onFailedToLoadImage();
|
||||
return;
|
||||
}
|
||||
|
||||
this.imageElement = await loadImage(imageDTO.image_url);
|
||||
await this.updateImageElement();
|
||||
} catch {
|
||||
this.onFailedToLoadImage();
|
||||
if (!this.konva.image) {
|
||||
this.konva.placeholder.group.visible(false);
|
||||
this.konva.placeholder.text.text(t('common.loadingImage', 'Loading Image'));
|
||||
}
|
||||
|
||||
const imageDTO = await getImageDTOSafe(imageName);
|
||||
if (imageDTO === null) {
|
||||
// ImageDTO not found (or network error)
|
||||
this.onFailedToLoadImage(t('controlLayers.unableToFindImage', 'Unable to find image'));
|
||||
return;
|
||||
}
|
||||
|
||||
const imageElementResult = await withResultAsync(() => loadImage(imageDTO.image_url));
|
||||
if (imageElementResult.isErr()) {
|
||||
// Image loading failed (e.g. the URL to the "physical" image is invalid)
|
||||
this.onFailedToLoadImage(t('controlLayers.unableToLoadImage', 'Unable to load image'));
|
||||
return;
|
||||
}
|
||||
|
||||
this.imageElement = imageElementResult.value;
|
||||
|
||||
await this.updateImageElement();
|
||||
};
|
||||
|
||||
onFailedToLoadImage = () => {
|
||||
this.log({ image: this.state.image }, 'Failed to load image');
|
||||
onFailedToLoadImage = (message: string) => {
|
||||
this.log({ image: this.state.image }, message);
|
||||
this.konva.image?.visible(false);
|
||||
this.isLoading = false;
|
||||
this.isError = true;
|
||||
this.konva.placeholder.text.text(t('common.imageFailedToLoad', 'Image Failed to Load'));
|
||||
this.konva.placeholder.text.text(message);
|
||||
this.konva.placeholder.group.visible(true);
|
||||
};
|
||||
|
||||
@@ -140,6 +146,7 @@ export class CanvasObjectImage extends CanvasModuleBase {
|
||||
image: this.imageElement,
|
||||
width,
|
||||
height,
|
||||
visible: true,
|
||||
});
|
||||
} else {
|
||||
this.log.trace('Creating new Konva image');
|
||||
@@ -202,6 +209,10 @@ export class CanvasObjectImage extends CanvasModuleBase {
|
||||
isLoading: this.isLoading,
|
||||
isError: this.isError,
|
||||
state: deepClone(this.state),
|
||||
konva: {
|
||||
group: getKonvaNodeDebugAttrs(this.konva.group),
|
||||
image: this.konva.image ? getKonvaNodeDebugAttrs(this.konva.image) : null,
|
||||
},
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ export class CanvasStagingAreaModule extends CanvasModuleBase {
|
||||
this.log.trace('Rendering staging area');
|
||||
const stagingArea = this.manager.stateApi.runSelector(selectCanvasStagingAreaSlice);
|
||||
|
||||
const { x, y, width, height } = this.manager.stateApi.getBbox().rect;
|
||||
const { x, y } = this.manager.stateApi.getBbox().rect;
|
||||
const shouldShowStagedImage = this.$shouldShowStagedImage.get();
|
||||
|
||||
this.selectedImage = stagingArea.stagedImages[stagingArea.selectedStagedImageIndex] ?? null;
|
||||
@@ -83,27 +83,51 @@ export class CanvasStagingAreaModule extends CanvasModuleBase {
|
||||
|
||||
if (this.selectedImage) {
|
||||
const { imageDTO } = this.selectedImage;
|
||||
const image = imageDTOToImageWithDims(imageDTO);
|
||||
|
||||
/**
|
||||
* When the final output image of a generation is received, we should clear that generation's last progress image.
|
||||
*
|
||||
* It's possible that we have already rendered the progress image from the next generation before the output image
|
||||
* from the previous is fully loaded/rendered. This race condition results in a flicker:
|
||||
* - LAST GENERATION: Render the final progress image
|
||||
* - LAST GENERATION: Start loading the final output image...
|
||||
* - NEXT GENERATION: Render the first progress image
|
||||
* - LAST GENERATION: ...Finish loading the final output image & render it, clearing the progress image <-- Flicker!
|
||||
* - NEXT GENERATION: Render the next progress image
|
||||
*
|
||||
* We can detect the race condition by stashing the session ID of the last progress image when we begin loading
|
||||
* that session's output image. After we render it, if the progress image's session ID is the same as the one we
|
||||
* stashed, we know that we have not yet gotten that next generation's first progress image. We can clear the
|
||||
* progress image without causing a flicker.
|
||||
*/
|
||||
const lastProgressEventSessionId = this.manager.progressImage.$lastProgressEvent.get()?.session_id;
|
||||
const hideProgressIfSameSession = () => {
|
||||
const currentProgressEventSessionId = this.manager.progressImage.$lastProgressEvent.get()?.session_id;
|
||||
if (lastProgressEventSessionId === currentProgressEventSessionId) {
|
||||
this.manager.progressImage.$lastProgressEvent.set(null);
|
||||
}
|
||||
};
|
||||
|
||||
if (!this.image) {
|
||||
const { image_name } = imageDTO;
|
||||
this.image = new CanvasObjectImage(
|
||||
{
|
||||
id: 'staging-area-image',
|
||||
type: 'image',
|
||||
image: {
|
||||
image_name: image_name,
|
||||
width,
|
||||
height,
|
||||
},
|
||||
image,
|
||||
},
|
||||
this
|
||||
);
|
||||
await this.image.update(this.image.state, true);
|
||||
this.konva.group.add(this.image.konva.group);
|
||||
}
|
||||
|
||||
if (!this.image.isLoading && !this.image.isError) {
|
||||
await this.image.update({ ...this.image.state, image: imageDTOToImageWithDims(imageDTO) }, true);
|
||||
this.manager.progressImage.$lastProgressEvent.set(null);
|
||||
hideProgressIfSameSession();
|
||||
} else if (this.image.isLoading) {
|
||||
// noop - just wait for the image to load
|
||||
} else if (this.image.state.image.image_name !== image.image_name) {
|
||||
await this.image.update({ ...this.image.state, image }, true);
|
||||
hideProgressIfSameSession();
|
||||
} else if (this.image.isError) {
|
||||
hideProgressIfSameSession();
|
||||
}
|
||||
this.image.konva.group.visible(shouldShowStagedImage);
|
||||
} else {
|
||||
@@ -136,6 +160,7 @@ export class CanvasStagingAreaModule extends CanvasModuleBase {
|
||||
selectedImage: this.selectedImage,
|
||||
$shouldShowStagedImage: this.$shouldShowStagedImage.get(),
|
||||
$isStaging: this.$isStaging.get(),
|
||||
image: this.image?.repr() ?? null,
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -277,8 +277,11 @@ export class CanvasBrushToolModule extends CanvasModuleBase {
|
||||
|
||||
let points: number[];
|
||||
|
||||
let isShiftDraw = false;
|
||||
|
||||
if (e.evt.shiftKey && lastLinePoint) {
|
||||
// Create a straight line from the last line point
|
||||
isShiftDraw = true;
|
||||
points = [
|
||||
lastLinePoint.x,
|
||||
lastLinePoint.y,
|
||||
@@ -298,15 +301,18 @@ export class CanvasBrushToolModule extends CanvasModuleBase {
|
||||
points,
|
||||
strokeWidth: settings.brushWidth,
|
||||
color: this.manager.stateApi.getCurrentColor(),
|
||||
clip: this.parent.getClip(selectedEntity.state),
|
||||
// When shift is held, the line may extend beyond the clip region. No clip for these lines.
|
||||
clip: isShiftDraw ? null : this.parent.getClip(selectedEntity.state),
|
||||
});
|
||||
} else {
|
||||
const lastLinePoint = getLastPointOfLastLine(selectedEntity.state.objects, 'brush_line');
|
||||
|
||||
let points: number[];
|
||||
let isShiftDraw = false;
|
||||
|
||||
if (e.evt.shiftKey && lastLinePoint) {
|
||||
// Create a straight line from the last line point
|
||||
isShiftDraw = true;
|
||||
points = [lastLinePoint.x, lastLinePoint.y, alignedPoint.x, alignedPoint.y];
|
||||
} else {
|
||||
// Create a new line with the current point
|
||||
@@ -319,7 +325,8 @@ export class CanvasBrushToolModule extends CanvasModuleBase {
|
||||
points,
|
||||
strokeWidth: settings.brushWidth,
|
||||
color: this.manager.stateApi.getCurrentColor(),
|
||||
clip: this.parent.getClip(selectedEntity.state),
|
||||
// When shift is held, the line may extend beyond the clip region. No clip for these lines.
|
||||
clip: isShiftDraw ? null : this.parent.getClip(selectedEntity.state),
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
@@ -3,7 +3,7 @@ import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
|
||||
import type { CanvasToolModule } from 'features/controlLayers/konva/CanvasTool/CanvasToolModule';
|
||||
import { getColorAtCoordinate, getPrefixedId } from 'features/controlLayers/konva/util';
|
||||
import type { RgbColor } from 'features/controlLayers/store/types';
|
||||
import type { RgbaColor } from 'features/controlLayers/store/types';
|
||||
import { RGBA_BLACK } from 'features/controlLayers/store/types';
|
||||
import Konva from 'konva';
|
||||
import type { KonvaEventObject } from 'konva/lib/Node';
|
||||
@@ -52,6 +52,39 @@ type CanvasColorPickerToolModuleConfig = {
|
||||
* The color of the crosshair line borders.
|
||||
*/
|
||||
CROSSHAIR_BORDER_COLOR: string;
|
||||
/**
|
||||
* The color of the RGBA value text.
|
||||
*/
|
||||
TEXT_COLOR: string;
|
||||
/**
|
||||
* The padding of the RGBA value text within the background rect.
|
||||
*/
|
||||
|
||||
TEXT_PADDING: number;
|
||||
/**
|
||||
* The font size of the RGBA value text.
|
||||
*/
|
||||
TEXT_FONT_SIZE: number;
|
||||
/**
|
||||
* The color of the RGBA value text background rect.
|
||||
*/
|
||||
TEXT_BG_COLOR: string;
|
||||
/**
|
||||
* The width of the RGBA value text background rect.
|
||||
*/
|
||||
TEXT_BG_WIDTH: number;
|
||||
/**
|
||||
* The height of the RGBA value text background rect.
|
||||
*/
|
||||
TEXT_BG_HEIGHT: number;
|
||||
/**
|
||||
* The corner radius of the RGBA value text background rect.
|
||||
*/
|
||||
TEXT_BG_CORNER_RADIUS: number;
|
||||
/**
|
||||
* The x offset of the RGBA value text background rect from the color picker ring.
|
||||
*/
|
||||
TEXT_BG_X_OFFSET: number;
|
||||
};
|
||||
|
||||
const DEFAULT_CONFIG: CanvasColorPickerToolModuleConfig = {
|
||||
@@ -65,6 +98,14 @@ const DEFAULT_CONFIG: CanvasColorPickerToolModuleConfig = {
|
||||
CROSSHAIR_LINE_LENGTH: 10,
|
||||
CROSSHAIR_LINE_COLOR: 'rgba(0,0,0,1)',
|
||||
CROSSHAIR_BORDER_COLOR: 'rgba(255,255,255,0.8)',
|
||||
TEXT_COLOR: 'rgba(255,255,255,1)',
|
||||
TEXT_BG_COLOR: 'rgba(0,0,0,0.8)',
|
||||
TEXT_BG_HEIGHT: 62,
|
||||
TEXT_BG_WIDTH: 62,
|
||||
TEXT_BG_CORNER_RADIUS: 7,
|
||||
TEXT_PADDING: 8,
|
||||
TEXT_FONT_SIZE: 12,
|
||||
TEXT_BG_X_OFFSET: 7,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -83,7 +124,7 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
|
||||
/**
|
||||
* The color currently under the cursor. Only has a value when the color picker tool is active.
|
||||
*/
|
||||
$colorUnderCursor = atom<RgbColor>(RGBA_BLACK);
|
||||
$colorUnderCursor = atom<RgbaColor>(RGBA_BLACK);
|
||||
|
||||
/**
|
||||
* The Konva objects that make up the color picker tool preview:
|
||||
@@ -105,6 +146,9 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
|
||||
crosshairSouthOuter: Konva.Line;
|
||||
crosshairWestInner: Konva.Line;
|
||||
crosshairWestOuter: Konva.Line;
|
||||
rgbaTextGroup: Konva.Group;
|
||||
rgbaText: Konva.Text;
|
||||
rgbaTextBackground: Konva.Rect;
|
||||
};
|
||||
|
||||
constructor(parent: CanvasToolModule) {
|
||||
@@ -202,8 +246,28 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
|
||||
stroke: this.config.CROSSHAIR_BORDER_COLOR,
|
||||
perfectDrawEnabled: false,
|
||||
}),
|
||||
rgbaTextGroup: new Konva.Group({
|
||||
listening: false,
|
||||
name: `${this.type}:color_picker_text_group`,
|
||||
}),
|
||||
rgbaText: new Konva.Text({
|
||||
listening: false,
|
||||
name: `${this.type}:color_picker_text`,
|
||||
fill: this.config.TEXT_COLOR,
|
||||
fontFamily: 'monospace',
|
||||
align: 'left',
|
||||
fontStyle: 'bold',
|
||||
verticalAlign: 'middle',
|
||||
}),
|
||||
rgbaTextBackground: new Konva.Rect({
|
||||
listening: false,
|
||||
name: `${this.type}:color_picker_text_background`,
|
||||
fill: this.config.TEXT_BG_COLOR,
|
||||
}),
|
||||
};
|
||||
|
||||
this.konva.rgbaTextGroup.add(this.konva.rgbaTextBackground, this.konva.rgbaText);
|
||||
|
||||
this.konva.group.add(
|
||||
this.konva.ringCandidateColor,
|
||||
this.konva.ringCurrentColor,
|
||||
@@ -216,7 +280,8 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
|
||||
this.konva.crosshairSouthOuter,
|
||||
this.konva.crosshairSouthInner,
|
||||
this.konva.crosshairWestOuter,
|
||||
this.konva.crosshairWestInner
|
||||
this.konva.crosshairWestInner,
|
||||
this.konva.rgbaTextGroup
|
||||
);
|
||||
}
|
||||
|
||||
@@ -233,11 +298,6 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.parent.getCanDraw()) {
|
||||
this.setVisibility(false);
|
||||
return;
|
||||
}
|
||||
|
||||
const cursorPos = this.parent.$cursorPos.get();
|
||||
|
||||
if (!cursorPos) {
|
||||
@@ -283,6 +343,24 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
|
||||
outerRadius: colorPickerOuterRadius + twoPixels,
|
||||
});
|
||||
|
||||
const textBgWidth = this.manager.stage.unscale(this.config.TEXT_BG_WIDTH);
|
||||
const textBgHeight = this.manager.stage.unscale(this.config.TEXT_BG_HEIGHT);
|
||||
|
||||
this.konva.rgbaTextBackground.setAttrs({
|
||||
width: textBgWidth,
|
||||
height: textBgHeight,
|
||||
cornerRadius: this.manager.stage.unscale(this.config.TEXT_BG_CORNER_RADIUS),
|
||||
});
|
||||
this.konva.rgbaText.setAttrs({
|
||||
padding: this.manager.stage.unscale(this.config.TEXT_PADDING),
|
||||
fontSize: this.manager.stage.unscale(this.config.TEXT_FONT_SIZE),
|
||||
text: `R: ${colorUnderCursor.r}\nG: ${colorUnderCursor.g}\nB: ${colorUnderCursor.b}\nA: ${colorUnderCursor.a}`,
|
||||
});
|
||||
this.konva.rgbaTextGroup.setAttrs({
|
||||
x: x + this.manager.stage.unscale(this.config.RING_OUTER_RADIUS + this.config.TEXT_BG_X_OFFSET),
|
||||
y: y - textBgHeight / 2,
|
||||
});
|
||||
|
||||
const size = this.manager.stage.unscale(this.config.CROSSHAIR_LINE_LENGTH);
|
||||
const space = this.manager.stage.unscale(this.config.CROSSHAIR_INNER_RADIUS);
|
||||
const innerThickness = this.manager.stage.unscale(this.config.CROSSHAIR_LINE_THICKNESS);
|
||||
@@ -329,11 +407,8 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
|
||||
|
||||
onStagePointerUp = (_e: KonvaEventObject<PointerEvent>) => {
|
||||
const color = this.$colorUnderCursor.get();
|
||||
if (color) {
|
||||
const settings = this.manager.stateApi.getSettings();
|
||||
// This will update the color but not the alpha value
|
||||
this.manager.stateApi.setColor({ ...settings.color, ...color });
|
||||
}
|
||||
const settings = this.manager.stateApi.getSettings();
|
||||
this.manager.stateApi.setColor({ ...settings.color, ...color });
|
||||
};
|
||||
|
||||
onStagePointerMove = (_e: KonvaEventObject<PointerEvent>) => {
|
||||
@@ -346,7 +421,11 @@ export class CanvasColorPickerToolModule extends CanvasModuleBase {
|
||||
return;
|
||||
}
|
||||
|
||||
// Hide the background layer so we can get the color under the cursor without the grid interfering
|
||||
this.manager.background.konva.layer.visible(false);
|
||||
const color = getColorAtCoordinate(this.manager.stage.konva.stage, cursorPos.absolute);
|
||||
this.manager.background.konva.layer.visible(true);
|
||||
|
||||
if (color) {
|
||||
this.$colorUnderCursor.set(color);
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import type {
|
||||
Coordinate,
|
||||
Tool,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import { isRenderableEntityType } from 'features/controlLayers/store/types';
|
||||
import Konva from 'konva';
|
||||
import type { KonvaEventObject } from 'konva/lib/Node';
|
||||
import { atom } from 'nanostores';
|
||||
@@ -177,24 +178,26 @@ export class CanvasToolModule extends CanvasModuleBase {
|
||||
stage.setCursor('not-allowed');
|
||||
} else if (tool === 'bbox') {
|
||||
this.tools.bbox.syncCursorStyle();
|
||||
} else if (this.manager.stateApi.getRenderedEntityCount() === 0) {
|
||||
stage.setCursor('not-allowed');
|
||||
} else if (selectedEntityAdapter?.$isDisabled.get()) {
|
||||
stage.setCursor('not-allowed');
|
||||
} else if (selectedEntityAdapter?.$isEntityTypeHidden.get()) {
|
||||
stage.setCursor('not-allowed');
|
||||
} else if (selectedEntityAdapter?.$isLocked.get()) {
|
||||
stage.setCursor('not-allowed');
|
||||
} else if (tool === 'brush') {
|
||||
this.tools.brush.syncCursorStyle();
|
||||
} else if (tool === 'eraser') {
|
||||
this.tools.eraser.syncCursorStyle();
|
||||
} else if (tool === 'colorPicker') {
|
||||
this.tools.colorPicker.syncCursorStyle();
|
||||
} else if (tool === 'move') {
|
||||
this.tools.move.syncCursorStyle();
|
||||
} else if (tool === 'rect') {
|
||||
this.tools.rect.syncCursorStyle();
|
||||
} else if (selectedEntityAdapter && isRenderableEntityType(selectedEntityAdapter.entityIdentifier.type)) {
|
||||
if (selectedEntityAdapter.$isDisabled.get()) {
|
||||
stage.setCursor('not-allowed');
|
||||
} else if (selectedEntityAdapter.$isEntityTypeHidden.get()) {
|
||||
stage.setCursor('not-allowed');
|
||||
} else if (selectedEntityAdapter.$isLocked.get()) {
|
||||
stage.setCursor('not-allowed');
|
||||
} else if (tool === 'brush') {
|
||||
this.tools.brush.syncCursorStyle();
|
||||
} else if (tool === 'eraser') {
|
||||
this.tools.eraser.syncCursorStyle();
|
||||
} else if (tool === 'move') {
|
||||
this.tools.move.syncCursorStyle();
|
||||
} else if (tool === 'rect') {
|
||||
this.tools.rect.syncCursorStyle();
|
||||
}
|
||||
} else if (this.manager.stateApi.getRenderedEntityCount() === 0) {
|
||||
stage.setCursor('not-allowed');
|
||||
} else {
|
||||
stage.setCursor('not-allowed');
|
||||
}
|
||||
@@ -387,15 +390,17 @@ export class CanvasToolModule extends CanvasModuleBase {
|
||||
try {
|
||||
this.$lastPointerType.set(e.evt.pointerType);
|
||||
|
||||
if (!this.getCanDraw()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const tool = this.$tool.get();
|
||||
|
||||
if (tool === 'colorPicker') {
|
||||
this.tools.colorPicker.onStagePointerUp(e);
|
||||
} else if (tool === 'brush') {
|
||||
}
|
||||
|
||||
if (!this.getCanDraw()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (tool === 'brush') {
|
||||
this.tools.brush.onStagePointerUp(e);
|
||||
} else if (tool === 'eraser') {
|
||||
this.tools.eraser.onStagePointerUp(e);
|
||||
@@ -416,15 +421,17 @@ export class CanvasToolModule extends CanvasModuleBase {
|
||||
this.$lastPointerType.set(e.evt.pointerType);
|
||||
this.syncCursorPositions();
|
||||
|
||||
if (!this.getCanDraw()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const tool = this.$tool.get();
|
||||
|
||||
if (tool === 'colorPicker') {
|
||||
this.tools.colorPicker.onStagePointerMove(e);
|
||||
} else if (tool === 'brush') {
|
||||
}
|
||||
|
||||
if (!this.getCanDraw()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (tool === 'brush') {
|
||||
await this.tools.brush.onStagePointerMove(e);
|
||||
} else if (tool === 'eraser') {
|
||||
await this.tools.eraser.onStagePointerMove(e);
|
||||
|
||||
@@ -7,6 +7,7 @@ import type {
|
||||
Coordinate,
|
||||
CoordinateWithPressure,
|
||||
Rect,
|
||||
RgbaColor,
|
||||
} from 'features/controlLayers/store/types';
|
||||
import type Konva from 'konva';
|
||||
import type { KonvaEventObject } from 'konva/lib/Node';
|
||||
@@ -15,7 +16,6 @@ import { clamp } from 'lodash-es';
|
||||
import { customAlphabet } from 'nanoid';
|
||||
import type { StrokeOptions } from 'perfect-freehand';
|
||||
import getStroke from 'perfect-freehand';
|
||||
import type { RgbColor } from 'react-colorful';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
/**
|
||||
@@ -484,9 +484,10 @@ export function loadImage(src: string): Promise<HTMLImageElement> {
|
||||
export const nanoid = customAlphabet('0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', 10);
|
||||
|
||||
export function getPrefixedId(
|
||||
prefix: CanvasEntityIdentifier['type'] | CanvasObjectState['type'] | (string & Record<never, never>)
|
||||
prefix: CanvasEntityIdentifier['type'] | CanvasObjectState['type'] | (string & Record<never, never>),
|
||||
separator = ':'
|
||||
): string {
|
||||
return `${prefix}:${nanoid()}`;
|
||||
return `${prefix}${separator}${nanoid()}`;
|
||||
}
|
||||
|
||||
export const getEmptyRect = (): Rect => {
|
||||
@@ -723,7 +724,7 @@ export const getPointerType = (e: KonvaEventObject<PointerEvent>): 'mouse' | 'pe
|
||||
* @param coord The coordinate to get the color at. This must be the _absolute_ coordinate on the stage.
|
||||
* @returns The color under the coordinate, or null if there was a problem getting the color.
|
||||
*/
|
||||
export const getColorAtCoordinate = (stage: Konva.Stage, coord: Coordinate): RgbColor | null => {
|
||||
export const getColorAtCoordinate = (stage: Konva.Stage, coord: Coordinate): RgbaColor | null => {
|
||||
const ctx = stage
|
||||
.toCanvas({ x: coord.x, y: coord.y, width: 1, height: 1, imageSmoothingEnabled: false })
|
||||
.getContext('2d');
|
||||
@@ -732,13 +733,13 @@ export const getColorAtCoordinate = (stage: Konva.Stage, coord: Coordinate): Rgb
|
||||
return null;
|
||||
}
|
||||
|
||||
const [r, g, b, _a] = ctx.getImageData(0, 0, 1, 1).data;
|
||||
const [r, g, b, a] = ctx.getImageData(0, 0, 1, 1).data;
|
||||
|
||||
if (r === undefined || g === undefined || b === undefined) {
|
||||
if (r === undefined || g === undefined || b === undefined || a === undefined) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return { r, g, b };
|
||||
return { r, g, b, a };
|
||||
};
|
||||
|
||||
export const roundRect = (rect: Rect): Rect => {
|
||||
|
||||
@@ -6,6 +6,35 @@ import type { ControlLoRAModelConfig, ControlNetModelConfig, T2IAdapterModelConf
|
||||
import { assert } from 'tsafe';
|
||||
import { z } from 'zod';
|
||||
|
||||
const zAjustImageChannels = z.enum([
|
||||
'Red (RGBA)',
|
||||
'Green (RGBA)',
|
||||
'Blue (RGBA)',
|
||||
'Alpha (RGBA)',
|
||||
'Cyan (CMYK)',
|
||||
'Magenta (CMYK)',
|
||||
'Yellow (CMYK)',
|
||||
'Black (CMYK)',
|
||||
'Hue (HSV)',
|
||||
'Saturation (HSV)',
|
||||
'Value (HSV)',
|
||||
'Luminosity (LAB)',
|
||||
'A (LAB)',
|
||||
'B (LAB)',
|
||||
'Y (YCbCr)',
|
||||
'Cb (YCbCr)',
|
||||
'Cr (YCbCr)',
|
||||
]);
|
||||
export type AjustImageChannels = z.infer<typeof zAjustImageChannels>;
|
||||
export const isAjustImageChannels = (v: unknown): v is AjustImageChannels => zAjustImageChannels.safeParse(v).success;
|
||||
const zAdjustImageFilterConfig = z.object({
|
||||
type: z.literal('adjust_image'),
|
||||
channel: zAjustImageChannels,
|
||||
value: z.number(),
|
||||
scale_values: z.boolean().optional(),
|
||||
});
|
||||
export type AdjustImageFilterConfig = z.infer<typeof zAdjustImageFilterConfig>;
|
||||
|
||||
const zCannyEdgeDetectionFilterConfig = z.object({
|
||||
type: z.literal('canny_edge_detection'),
|
||||
low_threshold: z.number().int().gte(0).lte(255),
|
||||
@@ -118,6 +147,7 @@ const zNoiseFilterConfig = z.object({
|
||||
export type NoiseFilterConfig = z.infer<typeof zNoiseFilterConfig>;
|
||||
|
||||
const zFilterConfig = z.discriminatedUnion('type', [
|
||||
zAdjustImageFilterConfig,
|
||||
zCannyEdgeDetectionFilterConfig,
|
||||
zColorMapFilterConfig,
|
||||
zContentShuffleFilterConfig,
|
||||
@@ -137,6 +167,7 @@ const zFilterConfig = z.discriminatedUnion('type', [
|
||||
export type FilterConfig = z.infer<typeof zFilterConfig>;
|
||||
|
||||
const zFilterType = z.enum([
|
||||
'adjust_image',
|
||||
'canny_edge_detection',
|
||||
'color_map',
|
||||
'content_shuffle',
|
||||
@@ -167,6 +198,42 @@ type ImageFilterData<T extends FilterConfig['type']> = {
|
||||
};
|
||||
|
||||
export const IMAGE_FILTERS: { [key in FilterConfig['type']]: ImageFilterData<key> } = {
|
||||
adjust_image: {
|
||||
type: 'adjust_image',
|
||||
buildDefaults: () => ({
|
||||
type: 'adjust_image',
|
||||
channel: 'Luminosity (LAB)',
|
||||
value: 1,
|
||||
scale_values: false,
|
||||
}),
|
||||
buildGraph: ({ image_name }, { channel, value, scale_values }) => {
|
||||
const graph = new Graph(getPrefixedId('adjust_image_filter'));
|
||||
let node;
|
||||
if (scale_values) {
|
||||
node = graph.addNode({
|
||||
id: getPrefixedId('img_channel_multiply'),
|
||||
type: 'img_channel_multiply',
|
||||
image: { image_name },
|
||||
channel,
|
||||
scale: value,
|
||||
invert_channel: false,
|
||||
});
|
||||
} else {
|
||||
value = Math.min(value, 2); // Limit value to a maximum of 2
|
||||
node = graph.addNode({
|
||||
id: getPrefixedId('img_channel_offset'),
|
||||
type: 'img_channel_offset',
|
||||
image: { image_name },
|
||||
channel,
|
||||
offset: Math.round(255 * (value - 1)), // value is in range [0, 2], offset is in range [-255, 255]
|
||||
});
|
||||
}
|
||||
return {
|
||||
graph,
|
||||
outputNodeId: node.id,
|
||||
};
|
||||
},
|
||||
},
|
||||
canny_edge_detection: {
|
||||
type: 'canny_edge_detection',
|
||||
buildDefaults: () => ({
|
||||
|
||||
@@ -49,6 +49,8 @@ export type ParamsState = {
|
||||
optimizedDenoisingEnabled: boolean;
|
||||
iterations: number;
|
||||
scheduler: ParameterScheduler;
|
||||
upscaleScheduler: ParameterScheduler;
|
||||
upscaleCfgScale: ParameterCFGScale;
|
||||
seed: ParameterSeed;
|
||||
shouldRandomizeSeed: boolean;
|
||||
steps: ParameterSteps;
|
||||
@@ -96,6 +98,8 @@ const initialState: ParamsState = {
|
||||
optimizedDenoisingEnabled: true,
|
||||
iterations: 1,
|
||||
scheduler: 'dpmpp_3m_k',
|
||||
upscaleScheduler: 'kdpm_2',
|
||||
upscaleCfgScale: 2,
|
||||
seed: 0,
|
||||
shouldRandomizeSeed: true,
|
||||
steps: 30,
|
||||
@@ -139,6 +143,9 @@ export const paramsSlice = createSlice({
|
||||
setCfgScale: (state, action: PayloadAction<ParameterCFGScale>) => {
|
||||
state.cfgScale = action.payload;
|
||||
},
|
||||
setUpscaleCfgScale: (state, action: PayloadAction<ParameterCFGScale>) => {
|
||||
state.upscaleCfgScale = action.payload;
|
||||
},
|
||||
setGuidance: (state, action: PayloadAction<ParameterGuidance>) => {
|
||||
state.guidance = action.payload;
|
||||
},
|
||||
@@ -148,6 +155,10 @@ export const paramsSlice = createSlice({
|
||||
setScheduler: (state, action: PayloadAction<ParameterScheduler>) => {
|
||||
state.scheduler = action.payload;
|
||||
},
|
||||
setUpscaleScheduler: (state, action: PayloadAction<ParameterScheduler>) => {
|
||||
state.upscaleScheduler = action.payload;
|
||||
},
|
||||
|
||||
setSeed: (state, action: PayloadAction<number>) => {
|
||||
state.seed = action.payload;
|
||||
state.shouldRandomizeSeed = false;
|
||||
@@ -315,6 +326,8 @@ export const {
|
||||
setCfgRescaleMultiplier,
|
||||
setGuidance,
|
||||
setScheduler,
|
||||
setUpscaleScheduler,
|
||||
setUpscaleCfgScale,
|
||||
setSeed,
|
||||
setImg2imgStrength,
|
||||
setOptimizedDenoisingEnabled,
|
||||
@@ -409,6 +422,9 @@ export const selectVAEPrecision = createParamsSelector((params) => params.vaePre
|
||||
export const selectIterations = createParamsSelector((params) => params.iterations);
|
||||
export const selectShouldUseCPUNoise = createParamsSelector((params) => params.shouldUseCpuNoise);
|
||||
|
||||
export const selectUpscaleScheduler = createParamsSelector((params) => params.upscaleScheduler);
|
||||
export const selectUpscaleCfgScale = createParamsSelector((params) => params.upscaleCfgScale);
|
||||
|
||||
export const selectRefinerCFGScale = createParamsSelector((params) => params.refinerCFGScale);
|
||||
export const selectRefinerModel = createParamsSelector((params) => params.refinerModel);
|
||||
export const selectIsRefinerModelSelected = createParamsSelector((params) => Boolean(params.refinerModel));
|
||||
|
||||
@@ -9,7 +9,8 @@ import type { DndListTargetState } from 'features/dnd/types';
|
||||
*/
|
||||
const line = {
|
||||
thickness: 2,
|
||||
backgroundColor: 'base.500',
|
||||
backgroundColor: 'red',
|
||||
// backgroundColor: 'base.500',
|
||||
};
|
||||
|
||||
type DropIndicatorProps = {
|
||||
@@ -104,7 +105,7 @@ function DndDropIndicatorInternal({ edge, gap = '0px' }: DropIndicatorProps) {
|
||||
);
|
||||
}
|
||||
|
||||
export const DndListDropIndicator = ({ dndState }: { dndState: DndListTargetState }) => {
|
||||
export const DndListDropIndicator = ({ dndState, gap }: { dndState: DndListTargetState; gap?: string }) => {
|
||||
if (dndState.type !== 'is-dragging-over') {
|
||||
return null;
|
||||
}
|
||||
@@ -117,7 +118,7 @@ export const DndListDropIndicator = ({ dndState }: { dndState: DndListTargetStat
|
||||
<DndDropIndicatorInternal
|
||||
edge={dndState.closestEdge}
|
||||
// This is the gap between items in the list, used to calculate the position of the drop indicator
|
||||
gap="var(--invoke-space-2)"
|
||||
gap={gap || 'var(--invoke-space-2)'}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -4,13 +4,17 @@ import { containsFiles, getFiles } from '@atlaskit/pragmatic-drag-and-drop/exter
|
||||
import { preventUnhandled } from '@atlaskit/pragmatic-drag-and-drop/prevent-unhandled';
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Box, Flex, Heading } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { getStore } from 'app/store/nanostores/store';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { setFileToPaste } from 'features/controlLayers/components/CanvasPasteModal';
|
||||
import { DndDropOverlay } from 'features/dnd/DndDropOverlay';
|
||||
import type { DndTargetState } from 'features/dnd/types';
|
||||
import { $imageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
|
||||
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { selectMaxImageUploadCount } from 'features/system/store/configSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import { memo, useCallback, useEffect, useRef, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { uploadImages } from 'services/api/endpoints/images';
|
||||
@@ -71,6 +75,8 @@ export const FullscreenDropzone = memo(() => {
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
const maxImageUploadCount = useAppSelector(selectMaxImageUploadCount);
|
||||
const [dndState, setDndState] = useState<DndTargetState>('idle');
|
||||
const activeTab = useAppSelector(selectActiveTab);
|
||||
const isImageViewerOpen = useStore($imageViewer);
|
||||
|
||||
const validateAndUploadFiles = useCallback(
|
||||
(files: File[]) => {
|
||||
@@ -92,6 +98,15 @@ export const FullscreenDropzone = memo(() => {
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// While on the canvas tab and when pasting a single image, canvas may want to create a new layer. Let it handle
|
||||
// the paste event.
|
||||
const [firstImageFile] = files;
|
||||
if (!isImageViewerOpen && activeTab === 'canvas' && files.length === 1 && firstImageFile) {
|
||||
setFileToPaste(firstImageFile);
|
||||
return;
|
||||
}
|
||||
|
||||
const autoAddBoardId = selectAutoAddBoardId(getState());
|
||||
|
||||
const uploadArgs: UploadImageArg[] = files.map((file, i) => ({
|
||||
@@ -104,7 +119,7 @@ export const FullscreenDropzone = memo(() => {
|
||||
|
||||
uploadImages(uploadArgs);
|
||||
},
|
||||
[maxImageUploadCount, t]
|
||||
[activeTab, isImageViewerOpen, maxImageUploadCount, t]
|
||||
);
|
||||
|
||||
const onPaste = useCallback(
|
||||
|
||||
@@ -108,18 +108,6 @@ export const singleCanvasEntityDndSource: DndSource<SingleCanvasEntityDndSourceD
|
||||
getData: buildGetData(_singleCanvasEntity.key, _singleCanvasEntity.type),
|
||||
};
|
||||
|
||||
const _singleWorkflowField = buildTypeAndKey('single-workflow-field');
|
||||
type SingleWorkflowFieldDndSourceData = DndData<
|
||||
typeof _singleWorkflowField.type,
|
||||
typeof _singleWorkflowField.key,
|
||||
{ fieldIdentifier: FieldIdentifier }
|
||||
>;
|
||||
export const singleWorkflowFieldDndSource: DndSource<SingleWorkflowFieldDndSourceData> = {
|
||||
..._singleWorkflowField,
|
||||
typeGuard: buildTypeGuard(_singleWorkflowField.key),
|
||||
getData: buildGetData(_singleWorkflowField.key, _singleWorkflowField.type),
|
||||
};
|
||||
|
||||
type DndTarget<TargetData extends DndData, SourceData extends DndData> = {
|
||||
key: symbol;
|
||||
type: TargetData['type'];
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import { Flex, IconButton, Input, Text } from '@invoke-ai/ui-library';
|
||||
import { useBoolean } from 'common/hooks/useBoolean';
|
||||
import { useEditable } from 'common/hooks/useEditable';
|
||||
import { withResultAsync } from 'common/util/result';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import type { ChangeEvent, KeyboardEvent } from 'react';
|
||||
import { memo, useCallback, useEffect, useRef, useState } from 'react';
|
||||
import { memo, useCallback, useRef } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPencilBold } from 'react-icons/pi';
|
||||
import { useUpdateBoardMutation } from 'services/api/endpoints/boards';
|
||||
@@ -16,85 +16,54 @@ type Props = {
|
||||
|
||||
export const BoardEditableTitle = memo(({ board, isSelected }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const isEditing = useBoolean(false);
|
||||
const [isHovering, setIsHovering] = useState(false);
|
||||
const [localTitle, setLocalTitle] = useState(board.board_name);
|
||||
const ref = useRef<HTMLInputElement>(null);
|
||||
const isHovering = useBoolean(false);
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
const [updateBoard, updateBoardResult] = useUpdateBoardMutation();
|
||||
|
||||
const onChange = useCallback((e: ChangeEvent<HTMLInputElement>) => {
|
||||
setLocalTitle(e.target.value);
|
||||
}, []);
|
||||
|
||||
const onEdit = useCallback(() => {
|
||||
isEditing.setTrue();
|
||||
setIsHovering(false);
|
||||
}, [isEditing]);
|
||||
|
||||
const onBlur = useCallback(async () => {
|
||||
const trimmedTitle = localTitle.trim();
|
||||
isEditing.setFalse();
|
||||
if (trimmedTitle.length === 0) {
|
||||
setLocalTitle(board.board_name);
|
||||
} else if (trimmedTitle !== board.board_name) {
|
||||
setLocalTitle(trimmedTitle);
|
||||
const onChange = useCallback(
|
||||
async (board_name: string) => {
|
||||
const result = await withResultAsync(() =>
|
||||
updateBoard({ board_id: board.board_id, changes: { board_name: trimmedTitle } }).unwrap()
|
||||
updateBoard({ board_id: board.board_id, changes: { board_name } }).unwrap()
|
||||
);
|
||||
if (result.isErr()) {
|
||||
setLocalTitle(board.board_name);
|
||||
toast({
|
||||
status: 'error',
|
||||
title: t('boards.updateBoardError'),
|
||||
});
|
||||
} else {
|
||||
setLocalTitle(result.value.board_name);
|
||||
}
|
||||
}
|
||||
}, [board.board_id, board.board_name, isEditing, localTitle, updateBoard, t]);
|
||||
|
||||
const onKeyDown = useCallback(
|
||||
(e: KeyboardEvent<HTMLInputElement>) => {
|
||||
if (e.key === 'Enter') {
|
||||
onBlur();
|
||||
} else if (e.key === 'Escape') {
|
||||
setLocalTitle(board.board_name);
|
||||
isEditing.setFalse();
|
||||
}
|
||||
},
|
||||
[board.board_name, isEditing, onBlur]
|
||||
[board.board_id, t, updateBoard]
|
||||
);
|
||||
|
||||
const handleMouseOver = useCallback(() => {
|
||||
setIsHovering(true);
|
||||
}, []);
|
||||
const editable = useEditable({
|
||||
value: board.board_name,
|
||||
defaultValue: board.board_name,
|
||||
onChange,
|
||||
inputRef,
|
||||
onStartEditing: isHovering.setTrue,
|
||||
});
|
||||
|
||||
const handleMouseOut = useCallback(() => {
|
||||
setIsHovering(false);
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (isEditing.isTrue) {
|
||||
ref.current?.focus();
|
||||
ref.current?.select();
|
||||
}
|
||||
}, [isEditing.isTrue]);
|
||||
|
||||
if (!isEditing.isTrue) {
|
||||
if (!editable.isEditing) {
|
||||
return (
|
||||
<Flex alignItems="center" gap={3} onMouseOver={handleMouseOver} onMouseOut={handleMouseOut}>
|
||||
<Flex alignItems="center" gap={3} onMouseOver={isHovering.setTrue} onMouseOut={isHovering.setFalse}>
|
||||
<Text
|
||||
size="sm"
|
||||
fontWeight="semibold"
|
||||
userSelect="none"
|
||||
color={isSelected ? 'base.100' : 'base.300'}
|
||||
onDoubleClick={onEdit}
|
||||
onDoubleClick={editable.startEditing}
|
||||
cursor="text"
|
||||
>
|
||||
{localTitle}
|
||||
{editable.value}
|
||||
</Text>
|
||||
{isHovering && (
|
||||
<IconButton aria-label="edit name" icon={<PiPencilBold />} size="sm" variant="ghost" onClick={onEdit} />
|
||||
{isHovering.isTrue && (
|
||||
<IconButton
|
||||
aria-label="edit name"
|
||||
icon={<PiPencilBold />}
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
onClick={editable.startEditing}
|
||||
/>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
@@ -102,11 +71,8 @@ export const BoardEditableTitle = memo(({ board, isSelected }: Props) => {
|
||||
|
||||
return (
|
||||
<Input
|
||||
ref={ref}
|
||||
value={localTitle}
|
||||
onChange={onChange}
|
||||
onBlur={onBlur}
|
||||
onKeyDown={onKeyDown}
|
||||
ref={inputRef}
|
||||
{...editable.inputProps}
|
||||
variant="outline"
|
||||
isDisabled={updateBoardResult.isLoading}
|
||||
_focusVisible={{ borderWidth: 1, borderColor: 'invokeBlueAlpha.400', borderRadius: 'base' }}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user