mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 09:18:00 -05:00
Compare commits
265 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e222484663 | ||
|
|
2a9cea6689 | ||
|
|
93da75209c | ||
|
|
9c819f0fd8 | ||
|
|
eef6fcf286 | ||
|
|
e375d9f787 | ||
|
|
ab18174774 | ||
|
|
9265841384 | ||
|
|
c5fd08125d | ||
|
|
11d88dae7f | ||
|
|
3b495659b0 | ||
|
|
15c9a3a4b6 | ||
|
|
60e77e4ed6 | ||
|
|
fa832a8ac6 | ||
|
|
f7834d7d59 | ||
|
|
63d7461510 | ||
|
|
1de704160e | ||
|
|
b118a2565c | ||
|
|
eb166baafe | ||
|
|
818d37f304 | ||
|
|
9cdb801c1c | ||
|
|
5da8cde4fc | ||
|
|
6ec3dc0c0d | ||
|
|
6050dffb25 | ||
|
|
93efeafe30 | ||
|
|
f167e8a8d3 | ||
|
|
124d49f35e | ||
|
|
52d8efa892 | ||
|
|
4ea8416c68 | ||
|
|
8dd0bfb068 | ||
|
|
6ff1c7d541 | ||
|
|
19f5a9c3a9 | ||
|
|
d9ce9c62ac | ||
|
|
cdc468a38c | ||
|
|
2656f13a4a | ||
|
|
da61396b1c | ||
|
|
6c9fb617dc | ||
|
|
5dd73fe53e | ||
|
|
e6793be465 | ||
|
|
63e62c5720 | ||
|
|
0848cb8ebd | ||
|
|
1b777bb972 | ||
|
|
029ee90351 | ||
|
|
2f9a064d48 | ||
|
|
b180666497 | ||
|
|
4740cd4f64 | ||
|
|
8b51298ba1 | ||
|
|
1533429e54 | ||
|
|
fc000214a5 | ||
|
|
f631aea4ee | ||
|
|
32f4c1f966 | ||
|
|
adebe639e3 | ||
|
|
44280ed472 | ||
|
|
cec8840038 | ||
|
|
fc7f484935 | ||
|
|
1aa7cd57c2 | ||
|
|
722a91aedb | ||
|
|
03c24ca9cb | ||
|
|
5820579237 | ||
|
|
6c768bfe7e | ||
|
|
5ca794b94f | ||
|
|
d20695260d | ||
|
|
d8557d573b | ||
|
|
6c1fd584d2 | ||
|
|
e8e764be20 | ||
|
|
e8023c44b0 | ||
|
|
a3a6449786 | ||
|
|
e9d2ffe3d7 | ||
|
|
23ad6fb730 | ||
|
|
00f36cb491 | ||
|
|
3f489c92c8 | ||
|
|
f147f99bef | ||
|
|
6107e3d281 | ||
|
|
de33d6e647 | ||
|
|
e36e5871a1 | ||
|
|
8b25c1a62e | ||
|
|
dfbd7eb1cf | ||
|
|
b43b2714cc | ||
|
|
e537de2f6d | ||
|
|
ccd399e277 | ||
|
|
bfad814862 | ||
|
|
6e8b7f9421 | ||
|
|
e47629cbe7 | ||
|
|
e840de27ed | ||
|
|
8342f32f2e | ||
|
|
a7aa529b99 | ||
|
|
4adc592657 | ||
|
|
e8d60e8d83 | ||
|
|
886f5c90a3 | ||
|
|
5e684c11f1 | ||
|
|
72ce239592 | ||
|
|
a826f8f8c5 | ||
|
|
b6c19a8e47 | ||
|
|
67d6cf19c6 | ||
|
|
a9bf651c69 | ||
|
|
3bd5d9a8e4 | ||
|
|
6249982d82 | ||
|
|
6b98dba71d | ||
|
|
c0065a65a0 | ||
|
|
cce3144c74 | ||
|
|
aab152a7e9 | ||
|
|
c5b948bc3f | ||
|
|
44ecddae2e | ||
|
|
26847895b9 | ||
|
|
e4a640f0a7 | ||
|
|
b5b6a96d94 | ||
|
|
806a8f69c5 | ||
|
|
ac0b9ba290 | ||
|
|
7ca613d41c | ||
|
|
5cb1ff8679 | ||
|
|
8794b99d51 | ||
|
|
6bdded85da | ||
|
|
26613f10c7 | ||
|
|
6d2fe3b691 | ||
|
|
2888845f7c | ||
|
|
4beccea6e7 | ||
|
|
68d1458c83 | ||
|
|
f4dde883ca | ||
|
|
be7eeb576b | ||
|
|
af9f0e0963 | ||
|
|
3cba53533d | ||
|
|
ab87511a03 | ||
|
|
af868b0ea6 | ||
|
|
960eae8255 | ||
|
|
0787c6c746 | ||
|
|
579d436934 | ||
|
|
36f01988e8 | ||
|
|
d9b92d19f9 | ||
|
|
fdfc379a84 | ||
|
|
2062cfe84a | ||
|
|
eb36e834b2 | ||
|
|
2baa33730a | ||
|
|
c30df7ce79 | ||
|
|
f05ac5a7a5 | ||
|
|
85dd78b8df | ||
|
|
4c7be03702 | ||
|
|
e354fee4f4 | ||
|
|
20e628297c | ||
|
|
98664fc46f | ||
|
|
33617fc06a | ||
|
|
c05e52ebae | ||
|
|
5734a97c55 | ||
|
|
94a73d5377 | ||
|
|
0f7fdabe9b | ||
|
|
7c1f1076b4 | ||
|
|
a6ac184211 | ||
|
|
7d58908e32 | ||
|
|
26d3ec3fce | ||
|
|
dc81357152 | ||
|
|
c9886796f6 | ||
|
|
209ddc2037 | ||
|
|
8b6a283eab | ||
|
|
75be6814bb | ||
|
|
1d213067e8 | ||
|
|
d67480d92c | ||
|
|
d55ea318ec | ||
|
|
474eab6f8a | ||
|
|
1b13fee256 | ||
|
|
6363095b29 | ||
|
|
4cd78b9478 | ||
|
|
2cde8a643e | ||
|
|
f9555f03f5 | ||
|
|
b1d8f3a3f9 | ||
|
|
33a9f9a4dc | ||
|
|
c35625eb44 | ||
|
|
6f572e1cce | ||
|
|
54acd3f2b1 | ||
|
|
6e966909ab | ||
|
|
311ba8c04b | ||
|
|
1b617768cf | ||
|
|
8ceb94497e | ||
|
|
efb571401c | ||
|
|
ffba4871d0 | ||
|
|
9437d701b2 | ||
|
|
6effa19626 | ||
|
|
45c2ac41d5 | ||
|
|
ca1c3c0873 | ||
|
|
47ee08db91 | ||
|
|
c96b98fc9e | ||
|
|
905baf2787 | ||
|
|
0e55488ff6 | ||
|
|
424a27eeda | ||
|
|
6007218a51 | ||
|
|
811e8a5a8b | ||
|
|
121918352a | ||
|
|
3717321480 | ||
|
|
4a250bdf9c | ||
|
|
dce8b88aaf | ||
|
|
1bdcbe3284 | ||
|
|
88ac3bc7f0 | ||
|
|
abb3bb9f7e | ||
|
|
2ddb82200c | ||
|
|
38880cde5c | ||
|
|
39ab4dd83e | ||
|
|
631878b212 | ||
|
|
7a5399e83c | ||
|
|
e90775731d | ||
|
|
3f26880493 | ||
|
|
21cf1004db | ||
|
|
d74cd12aa6 | ||
|
|
cf1883585d | ||
|
|
8a791d4f16 | ||
|
|
1212698059 | ||
|
|
ba6db33b39 | ||
|
|
b3dbfdaa02 | ||
|
|
3441187c23 | ||
|
|
8de56fd77c | ||
|
|
22bd33b7c6 | ||
|
|
2af5c4be9f | ||
|
|
415a41e21a | ||
|
|
aa2ca03056 | ||
|
|
a20faca20f | ||
|
|
9d042baf48 | ||
|
|
6195741814 | ||
|
|
c2f8adf93e | ||
|
|
ace3955760 | ||
|
|
720e16cea6 | ||
|
|
a357a1ac9d | ||
|
|
22f160bfcc | ||
|
|
fa637b5c59 | ||
|
|
1f68a60752 | ||
|
|
048bd18e10 | ||
|
|
e5ec529f0f | ||
|
|
d884c15d0c | ||
|
|
9ee7cad613 | ||
|
|
629110784d | ||
|
|
c1666a8b5a | ||
|
|
d14b315bc6 | ||
|
|
fe459295ea | ||
|
|
9d67ec9efe | ||
|
|
5bf4d37949 | ||
|
|
387ab9cee7 | ||
|
|
56050f7887 | ||
|
|
c354470cd1 | ||
|
|
ded8267505 | ||
|
|
e822897b1c | ||
|
|
2d7b8c2a1b | ||
|
|
ebeae41cb2 | ||
|
|
6f5f3381f9 | ||
|
|
2f6fec8c6c | ||
|
|
cc4bef4859 | ||
|
|
b6a45e53f1 | ||
|
|
1cf1e53a6c | ||
|
|
c686625076 | ||
|
|
d861bc690e | ||
|
|
f262b9032d | ||
|
|
71c3197eab | ||
|
|
241a1fdb57 | ||
|
|
3595beac1e | ||
|
|
caa7c0f2bd | ||
|
|
d546823c4d | ||
|
|
dac2d78da6 | ||
|
|
398f37c0ed | ||
|
|
6b0bf59682 | ||
|
|
5b8f77f990 | ||
|
|
07cb6c944e | ||
|
|
1d45ef529b | ||
|
|
0259114d9c | ||
|
|
51e515b925 | ||
|
|
8c509295f9 | ||
|
|
23da3de915 | ||
|
|
97579770e1 | ||
|
|
1a83936cdd | ||
|
|
80e311a069 | ||
|
|
b6e6bdc195 |
495
README.md
495
README.md
@@ -2,21 +2,102 @@
|
||||
|
||||

|
||||
|
||||
# Invoke - Professional Creative AI Tools for Visual Media
|
||||
## To learn more about Invoke, or implement our Business solutions, visit [invoke.com](https://www.invoke.com/about)
|
||||
|
||||
# Invoke - Professional Creative AI Tools for Visual Media
|
||||
|
||||
#### To learn more about Invoke, or implement our Business solutions, visit [invoke.com]
|
||||
|
||||
[![discord badge]][discord link]
|
||||
[![discord badge]][discord link] [![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link] [![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link] [![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
|
||||
|
||||
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
||||
</div>
|
||||
|
||||
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
|
||||
Invoke is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. Invoke offers an industry leading web-based UI, and serves as the foundation for multiple commercial products.
|
||||
|
||||
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
|
||||
[Installation and Updates][installation docs] - [Documentation and Tutorials][docs home] - [Bug Reports][github issues] - [Contributing][contributing docs]
|
||||
|
||||
<div align="center">
|
||||
|
||||

|
||||
|
||||
</div>
|
||||
|
||||
## Quick Start
|
||||
|
||||
1. Download and unzip the installer from the bottom of the [latest release][latest release link].
|
||||
2. Run the installer script.
|
||||
|
||||
- **Windows**: Double-click on the `install.bat` script.
|
||||
- **macOS**: Open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press enter.
|
||||
- **Linux**: Run `install.sh`.
|
||||
|
||||
3. When prompted, enter a location for the install and select your GPU type.
|
||||
4. Once the install finishes, find the directory you selected during install. The default location is `C:\Users\Username\invokeai` for Windows or `~/invokeai` for Linux/macOS.
|
||||
5. Run the launcher script (`invoke.bat` for Windows, `invoke.sh` for macOS and Linux) the same way you ran the installer script in step 2.
|
||||
6. Select option 1 to start the application. Once it starts up, open your browser and go to <http://localhost:9090>.
|
||||
7. Open the model manager tab to install a starter model and then you'll be ready to generate.
|
||||
|
||||
More detail, including hardware requirements and manual install instructions, are available in the [installation documentation][installation docs].
|
||||
|
||||
## Troubleshooting, FAQ and Support
|
||||
|
||||
Please review our [FAQ][faq] for solutions to common installation problems and other issues.
|
||||
|
||||
For more help, please join our [Discord][discord link].
|
||||
|
||||
## Features
|
||||
|
||||
Full details on features can be found in [our documentation][features docs].
|
||||
|
||||
### Web Server & UI
|
||||
|
||||
Invoke runs a locally hosted web server & React UI with an industry-leading user experience.
|
||||
|
||||
### Unified Canvas
|
||||
|
||||
The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/out-painting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more.
|
||||
|
||||
### Workflows & Nodes
|
||||
|
||||
Invoke offers a fully featured workflow management solution, enabling users to combine the power of node-based workflows with the easy of a UI. This allows for customizable generation pipelines to be developed and shared by users looking to create specific workflows to support their production use-cases.
|
||||
|
||||
### Board & Gallery Management
|
||||
|
||||
Invoke features an organized gallery system for easily storing, accessing, and remixing your content in the Invoke workspace. Images can be dragged/dropped onto any Image-base UI element in the application, and rich metadata within the Image allows for easy recall of key prompts or settings used in your workflow.
|
||||
|
||||
### Other features
|
||||
|
||||
- Support for both ckpt and diffusers models
|
||||
- SD1.5, SD2.0, and SDXL support
|
||||
- Upscaling Tools
|
||||
- Embedding Manager & Support
|
||||
- Model Manager & Support
|
||||
- Workflow creation & management
|
||||
- Node-Based Architecture
|
||||
|
||||
## Contributing
|
||||
|
||||
Anyone who wishes to contribute to this project - whether documentation, features, bug fixes, code cleanup, testing, or code reviews - is very much encouraged to do so.
|
||||
|
||||
Get started with contributing by reading our [contribution documentation][contributing docs], joining the [#dev-chat] or the GitHub discussion board.
|
||||
|
||||
We hope you enjoy using Invoke as much as we enjoy creating it, and we hope you will elect to become part of our community.
|
||||
|
||||
## Thanks
|
||||
|
||||
Invoke is a combined effort of [passionate and talented people from across the world][contributors]. We thank them for their time, hard work and effort.
|
||||
|
||||
Original portions of the software are Copyright © 2024 by respective contributors.
|
||||
|
||||
[features docs]: https://invoke-ai.github.io/InvokeAI/features/
|
||||
[faq]: https://invoke-ai.github.io/InvokeAI/help/FAQ/
|
||||
[contributors]: https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/
|
||||
[invoke.com]: https://www.invoke.com/about
|
||||
[github issues]: https://github.com/invoke-ai/InvokeAI/issues
|
||||
[docs home]: https://invoke-ai.github.io/InvokeAI
|
||||
[installation docs]: https://invoke-ai.github.io/InvokeAI/installation/INSTALLATION/
|
||||
[#dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939
|
||||
[contributing docs]: https://invoke-ai.github.io/InvokeAI/contributing/CONTRIBUTING/
|
||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||
[CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
|
||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
|
||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||
@@ -30,402 +111,6 @@
|
||||
[latest commit to main badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/main?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
||||
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
|
||||
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases/latest
|
||||
[translation status badge]: https://hosted.weblate.org/widgets/invokeai/-/svg-badge.svg
|
||||
[translation status link]: https://hosted.weblate.org/engage/invokeai/
|
||||
|
||||
</div>
|
||||
|
||||
InvokeAI is a leading creative engine built to empower professionals
|
||||
and enthusiasts alike. Generate and create stunning visual media using
|
||||
the latest AI-driven technologies. InvokeAI offers an industry leading
|
||||
Web Interface, interactive Command Line Interface, and also serves as
|
||||
the foundation for multiple commercial products.
|
||||
|
||||
**Quick links**: [[How to
|
||||
Install](https://invoke-ai.github.io/InvokeAI/installation/INSTALLATION/)] [<a
|
||||
href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a
|
||||
href="https://invoke-ai.github.io/InvokeAI/">Documentation and
|
||||
Tutorials</a>]
|
||||
[<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>]
|
||||
[<a
|
||||
href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion,
|
||||
Ideas & Q&A</a>]
|
||||
[<a
|
||||
href="https://invoke-ai.github.io/InvokeAI/contributing/CONTRIBUTING/">Contributing</a>]
|
||||
|
||||
<div align="center">
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
</div>
|
||||
|
||||
## Table of Contents
|
||||
|
||||
Table of Contents 📝
|
||||
|
||||
**Getting Started**
|
||||
1. 🏁 [Quick Start](#quick-start)
|
||||
3. 🖥️ [Hardware Requirements](#hardware-requirements)
|
||||
|
||||
**More About Invoke**
|
||||
1. 🌟 [Features](#features)
|
||||
2. 📣 [Latest Changes](#latest-changes)
|
||||
3. 🛠️ [Troubleshooting](#troubleshooting)
|
||||
|
||||
**Supporting the Project**
|
||||
1. 🤝 [Contributing](#contributing)
|
||||
2. 👥 [Contributors](#contributors)
|
||||
3. 💕 [Support](#support)
|
||||
|
||||
## Quick Start
|
||||
|
||||
For full installation and upgrade instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALLATION/)
|
||||
|
||||
If upgrading from version 2.3, please read [Migrating a 2.3 root
|
||||
directory to 3.0](#migrating-to-3) first.
|
||||
|
||||
### Automatic Installer (suggested for 1st time users)
|
||||
|
||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
||||
|
||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
||||
|
||||
3. Unzip the file.
|
||||
|
||||
4. **Windows:** double-click on the `install.bat` script. **macOS:** Open a Terminal window, drag the file `install.sh` from Finder
|
||||
into the Terminal, and press return. **Linux:** run `install.sh`.
|
||||
|
||||
5. You'll be asked to confirm the location of the folder in which
|
||||
to install InvokeAI and its image generation model files. Pick a
|
||||
location with at least 15 GB of free memory. More if you plan on
|
||||
installing lots of models.
|
||||
|
||||
6. Wait while the installer does its thing. After installing the software,
|
||||
the installer will launch a script that lets you configure InvokeAI and
|
||||
select a set of starting image generation models.
|
||||
|
||||
7. Find the folder that InvokeAI was installed into (it is not the
|
||||
same as the unpacked zip file directory!) The default location of this
|
||||
folder (if you didn't change it in step 5) is `~/invokeai` on
|
||||
Linux/Mac systems, and `C:\Users\YourName\invokeai` on Windows. This directory will contain launcher scripts named `invoke.sh` and `invoke.bat`.
|
||||
|
||||
8. On Windows systems, double-click on the `invoke.bat` file. On
|
||||
macOS, open a Terminal window, drag `invoke.sh` from the folder into
|
||||
the Terminal, and press return. On Linux, run `invoke.sh`
|
||||
|
||||
9. Press 2 to open the "browser-based UI", press enter/return, wait a
|
||||
minute or two for Stable Diffusion to start up, then open your browser
|
||||
and go to http://localhost:9090.
|
||||
|
||||
10. Type `banana sushi` in the box on the top left and click `Invoke`
|
||||
|
||||
### Command-Line Installation (for developers and users familiar with Terminals)
|
||||
|
||||
You must have Python 3.10 through 3.11 installed on your machine. Earlier or
|
||||
later versions are not supported.
|
||||
Node.js also needs to be installed along with `pnpm` (can be installed with
|
||||
the command `npm install -g pnpm` if needed)
|
||||
|
||||
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
|
||||
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
|
||||
|
||||
```terminal
|
||||
mkdir invokeai
|
||||
````
|
||||
|
||||
3. Create a virtual environment named `.venv` inside this directory and activate it:
|
||||
|
||||
```terminal
|
||||
cd invokeai
|
||||
python -m venv .venv --prompt InvokeAI
|
||||
```
|
||||
|
||||
4. Activate the virtual environment (do it every time you run InvokeAI)
|
||||
|
||||
_For Linux/Mac users:_
|
||||
|
||||
```sh
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
_For Windows users:_
|
||||
|
||||
```ps
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
5. Install the InvokeAI module and its dependencies. Choose the command suited for your platform & GPU.
|
||||
|
||||
_For Windows/Linux with an NVIDIA GPU:_
|
||||
|
||||
```terminal
|
||||
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||
```
|
||||
|
||||
_For Linux with an AMD GPU:_
|
||||
|
||||
```sh
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||
```
|
||||
|
||||
_For non-GPU systems:_
|
||||
```terminal
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
```
|
||||
|
||||
_For Macintoshes, either Intel or M1/M2/M3:_
|
||||
|
||||
```sh
|
||||
pip install InvokeAI --use-pep517
|
||||
```
|
||||
|
||||
6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once):
|
||||
|
||||
```terminal
|
||||
invokeai-configure --root .
|
||||
```
|
||||
Don't miss the dot at the end!
|
||||
|
||||
7. Launch the web server (do it every time you run InvokeAI):
|
||||
|
||||
```terminal
|
||||
invokeai-web
|
||||
```
|
||||
|
||||
8. Point your browser to http://localhost:9090 to bring up the web interface.
|
||||
|
||||
9. Type `banana sushi` in the box on the top left and click `Invoke`.
|
||||
|
||||
Be sure to activate the virtual environment each time before re-launching InvokeAI,
|
||||
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
|
||||
|
||||
## Detailed Installation Instructions
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver). For full installation and upgrade
|
||||
instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
||||
|
||||
<a name="migrating-to-3"></a>
|
||||
### Migrating a v2.3 InvokeAI root directory
|
||||
|
||||
The InvokeAI root directory is where the InvokeAI startup file,
|
||||
installed models, and generated images are stored. It is ordinarily
|
||||
named `invokeai` and located in your home directory. The contents and
|
||||
layout of this directory has changed between versions 2.3 and 3.0 and
|
||||
cannot be used directly.
|
||||
|
||||
We currently recommend that you use the installer to create a new root
|
||||
directory named differently from the 2.3 one, e.g. `invokeai-3` and
|
||||
then use a migration script to copy your 2.3 models into the new
|
||||
location. However, if you choose, you can upgrade this directory in
|
||||
place. This section gives both recipes.
|
||||
|
||||
#### Creating a new root directory and migrating old models
|
||||
|
||||
This is the safer recipe because it leaves your old root directory in
|
||||
place to fall back on.
|
||||
|
||||
1. Follow the instructions above to create and install InvokeAI in a
|
||||
directory that has a different name from the 2.3 invokeai directory.
|
||||
In this example, we will use "invokeai-3"
|
||||
|
||||
2. When you are prompted to select models to install, select a minimal
|
||||
set of models, such as stable-diffusion-v1.5 only.
|
||||
|
||||
3. After installation is complete launch `invokeai.sh` (Linux/Mac) or
|
||||
`invokeai.bat` and select option 8 "Open the developers console". This
|
||||
will take you to the command line.
|
||||
|
||||
4. Issue the command `invokeai-migrate3 --from /path/to/v2.3-root --to
|
||||
/path/to/invokeai-3-root`. Provide the correct `--from` and `--to`
|
||||
paths for your v2.3 and v3.0 root directories respectively.
|
||||
|
||||
This will copy and convert your old models from 2.3 format to 3.0
|
||||
format and create a new `models` directory in the 3.0 directory. The
|
||||
old models directory (which contains the models selected at install
|
||||
time) will be renamed `models.orig` and can be deleted once you have
|
||||
confirmed that the migration was successful.
|
||||
|
||||
If you wish, you can pass the 2.3 root directory to both `--from` and
|
||||
`--to` in order to update in place. Warning: this directory will no
|
||||
longer be usable with InvokeAI 2.3.
|
||||
|
||||
#### Migrating in place
|
||||
|
||||
For the adventurous, you may do an in-place upgrade from 2.3 to 3.0
|
||||
without touching the command line. ***This recipe does not work on
|
||||
Windows platforms due to a bug in the Windows version of the 2.3
|
||||
upgrade script.** See the next section for a Windows recipe.
|
||||
|
||||
##### For Mac and Linux Users:
|
||||
|
||||
1. Launch the InvokeAI launcher script in your current v2.3 root directory.
|
||||
|
||||
2. Select option [9] "Update InvokeAI" to bring up the updater dialog.
|
||||
|
||||
3. Select option [1] to upgrade to the latest release.
|
||||
|
||||
4. Once the upgrade is finished you will be returned to the launcher
|
||||
menu. Select option [6] "Re-run the configure script to fix a broken
|
||||
install or to complete a major upgrade".
|
||||
|
||||
This will run the configure script against the v2.3 directory and
|
||||
update it to the 3.0 format. The following files will be replaced:
|
||||
|
||||
- The invokeai.init file, replaced by invokeai.yaml
|
||||
- The models directory
|
||||
- The configs/models.yaml model index
|
||||
|
||||
The original versions of these files will be saved with the suffix
|
||||
".orig" appended to the end. Once you have confirmed that the upgrade
|
||||
worked, you can safely remove these files. Alternatively you can
|
||||
restore a working v2.3 directory by removing the new files and
|
||||
restoring the ".orig" files' original names.
|
||||
|
||||
##### For Windows Users:
|
||||
|
||||
Windows Users can upgrade with the
|
||||
|
||||
1. Enter the 2.3 root directory you wish to upgrade
|
||||
2. Launch `invoke.sh` or `invoke.bat`
|
||||
3. Select the "Developer's console" option [8]
|
||||
4. Type the following commands
|
||||
|
||||
```
|
||||
pip install "invokeai @ https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v3.0.0" --use-pep517 --upgrade
|
||||
invokeai-configure --root .
|
||||
```
|
||||
(Replace `v3.0.0` with the current release number if this document is out of date).
|
||||
|
||||
The first command will install and upgrade new software to run
|
||||
InvokeAI. The second will prepare the 2.3 directory for use with 3.0.
|
||||
You may now launch the WebUI in the usual way, by selecting option [1]
|
||||
from the launcher script
|
||||
|
||||
#### Migrating Images
|
||||
|
||||
The migration script will migrate your invokeai settings and models,
|
||||
including textual inversion models, LoRAs and merges that you may have
|
||||
installed previously. However it does **not** migrate the generated
|
||||
images stored in your 2.3-format outputs directory. To do this, you
|
||||
need to run an additional step:
|
||||
|
||||
1. From a working InvokeAI 3.0 root directory, start the launcher and
|
||||
enter menu option [8] to open the "developer's console".
|
||||
|
||||
2. At the developer's console command line, type the command:
|
||||
|
||||
```bash
|
||||
invokeai-import-images
|
||||
```
|
||||
|
||||
3. This will lead you through the process of confirming the desired
|
||||
source and destination for the imported images. The images will
|
||||
appear in the gallery board of your choice, and contain the
|
||||
original prompt, model name, and other parameters used to generate
|
||||
the image.
|
||||
|
||||
(Many kudos to **techjedi** for contributing this script.)
|
||||
|
||||
## Hardware Requirements
|
||||
|
||||
InvokeAI is supported across Linux, Windows and macOS. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver).
|
||||
|
||||
### System
|
||||
|
||||
You will need one of the following:
|
||||
|
||||
- An NVIDIA-based graphics card with 4 GB or more VRAM memory. 6-8 GB
|
||||
of VRAM is highly recommended for rendering using the Stable
|
||||
Diffusion XL models
|
||||
- An Apple computer with an M1 chip.
|
||||
- An AMD-based graphics card with 4GB or more VRAM memory (Linux
|
||||
only), 6-8 GB for XL rendering.
|
||||
|
||||
We do not recommend the GTX 1650 or 1660 series video cards. They are
|
||||
unable to run in half-precision mode and do not have sufficient VRAM
|
||||
to render 512x512 images.
|
||||
|
||||
**Memory** - At least 12 GB Main Memory RAM.
|
||||
|
||||
**Disk** - At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||
|
||||
## Features
|
||||
|
||||
Feature documentation can be reviewed by navigating to [the InvokeAI Documentation page](https://invoke-ai.github.io/InvokeAI/features/)
|
||||
|
||||
### *Web Server & UI*
|
||||
|
||||
InvokeAI offers a locally hosted Web Server & React Frontend, with an industry leading user experience. The Web-based UI allows for simple and intuitive workflows, and is responsive for use on mobile devices and tablets accessing the web server.
|
||||
|
||||
### *Unified Canvas*
|
||||
|
||||
The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/outpainting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more.
|
||||
|
||||
### *Workflows & Nodes*
|
||||
|
||||
InvokeAI offers a fully featured workflow management solution, enabling users to combine the power of nodes based workflows with the easy of a UI. This allows for customizable generation pipelines to be developed and shared by users looking to create specific workflows to support their production use-cases.
|
||||
|
||||
### *Board & Gallery Management*
|
||||
|
||||
Invoke AI provides an organized gallery system for easily storing, accessing, and remixing your content in the Invoke workspace. Images can be dragged/dropped onto any Image-base UI element in the application, and rich metadata within the Image allows for easy recall of key prompts or settings used in your workflow.
|
||||
|
||||
### Other features
|
||||
|
||||
- *Support for both ckpt and diffusers models*
|
||||
- *SD 2.0, 2.1, XL support*
|
||||
- *Upscaling Tools*
|
||||
- *Embedding Manager & Support*
|
||||
- *Model Manager & Support*
|
||||
- *Workflow creation & management*
|
||||
- *Node-Based Architecture*
|
||||
|
||||
|
||||
### Latest Changes
|
||||
|
||||
For our latest changes, view our [Release
|
||||
Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
|
||||
[CHANGELOG](docs/CHANGELOG.md).
|
||||
|
||||
### Troubleshooting / FAQ
|
||||
|
||||
Please check out our **[FAQ](https://invoke-ai.github.io/InvokeAI/help/FAQ/)** to get solutions for common installation
|
||||
problems and other issues. For more help, please join our [Discord][discord link]
|
||||
|
||||
## Contributing
|
||||
|
||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||
cleanup, testing, or code reviews, is very much encouraged to do so.
|
||||
|
||||
Get started with contributing by reading our [Contribution documentation](https://invoke-ai.github.io/InvokeAI/contributing/CONTRIBUTING/), joining the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) or the GitHub discussion board.
|
||||
|
||||
If you are unfamiliar with how
|
||||
to contribute to GitHub projects, we have a new contributor checklist you can follow to get started contributing:
|
||||
[New Contributor Checklist](https://invoke-ai.github.io/InvokeAI/contributing/contribution_guides/newContributorChecklist/).
|
||||
|
||||
We hope you enjoy using our software as much as we enjoy creating it,
|
||||
and we hope that some of those of you who are reading this will elect
|
||||
to become part of our community.
|
||||
|
||||
Welcome to InvokeAI!
|
||||
|
||||
### Contributors
|
||||
|
||||
This fork is a combined effort of various people from across the world.
|
||||
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
||||
their time, hard work and effort.
|
||||
|
||||
### Support
|
||||
|
||||
For support, please use this repository's GitHub Issues tracking service, or join the [Discord][discord link].
|
||||
|
||||
Original portions of the software are Copyright (c) 2023 by respective contributors.
|
||||
|
||||
|
||||
@@ -51,13 +51,11 @@ The settings in this file will override the defaults. You only need
|
||||
to change this file if the default for a particular setting doesn't
|
||||
work for you.
|
||||
|
||||
You'll find an example file next to `invokeai.yaml` that shows the default values.
|
||||
|
||||
Some settings, like [Model Marketplace API Keys], require the YAML
|
||||
to be formatted correctly. Here is a [basic guide to YAML files].
|
||||
|
||||
You can fix a broken `invokeai.yaml` by deleting it and running the
|
||||
configuration script again -- option [6] in the launcher, "Re-run the
|
||||
configure script".
|
||||
|
||||
#### Custom Config File Location
|
||||
|
||||
You can use any config file with the `--config` CLI arg. Pass in the path to the `invokeai.yaml` file you want to use.
|
||||
|
||||
@@ -4,278 +4,6 @@ title: Training
|
||||
|
||||
# :material-file-document: Training
|
||||
|
||||
# Textual Inversion Training
|
||||
## **Personalizing Text-to-Image Generation**
|
||||
Invoke Training has moved to its own repository, with a dedicated UI for accessing common scripts like Textual Inversion and LoRA training.
|
||||
|
||||
You may personalize the generated images to provide your own styles or objects
|
||||
by training a new LDM checkpoint and introducing a new vocabulary to the fixed
|
||||
model as a (.pt) embeddings file. Alternatively, you may use or train
|
||||
HuggingFace Concepts embeddings files (.bin) from
|
||||
<https://huggingface.co/sd-concepts-library> and its associated
|
||||
notebooks.
|
||||
|
||||
## **Hardware and Software Requirements**
|
||||
|
||||
You will need a GPU to perform training in a reasonable length of
|
||||
time, and at least 12 GB of VRAM. We recommend using the [`xformers`
|
||||
library](../installation/070_INSTALL_XFORMERS.md) to accelerate the
|
||||
training process further. During training, about ~8 GB is temporarily
|
||||
needed in order to store intermediate models, checkpoints and logs.
|
||||
|
||||
## **Preparing for Training**
|
||||
|
||||
To train, prepare a folder that contains 3-5 images that illustrate
|
||||
the object or concept. It is good to provide a variety of examples or
|
||||
poses to avoid overtraining the system. Format these images as PNG
|
||||
(preferred) or JPG. You do not need to resize or crop the images in
|
||||
advance, but for more control you may wish to do so.
|
||||
|
||||
Place the training images in a directory on the machine InvokeAI runs
|
||||
on. We recommend placing them in a subdirectory of the
|
||||
`text-inversion-training-data` folder located in the InvokeAI root
|
||||
directory, ordinarily `~/invokeai` (Linux/Mac), or
|
||||
`C:\Users\your_name\invokeai` (Windows). For example, to create an
|
||||
embedding for the "psychedelic" style, you'd place the training images
|
||||
into the directory
|
||||
`~invokeai/text-inversion-training-data/psychedelic`.
|
||||
|
||||
## **Launching Training Using the Console Front End**
|
||||
|
||||
InvokeAI 2.3 and higher comes with a text console-based training front
|
||||
end. From within the `invoke.sh`/`invoke.bat` Invoke launcher script,
|
||||
start training tool selecting choice (3):
|
||||
|
||||
```sh
|
||||
1 "Generate images with a browser-based interface"
|
||||
2 "Explore InvokeAI nodes using a command-line interface"
|
||||
3 "Textual inversion training"
|
||||
4 "Merge models (diffusers type only)"
|
||||
5 "Download and install models"
|
||||
6 "Change InvokeAI startup options"
|
||||
7 "Re-run the configure script to fix a broken install or to complete a major upgrade"
|
||||
8 "Open the developer console"
|
||||
9 "Update InvokeAI"
|
||||
```
|
||||
|
||||
Alternatively, you can select option (8) or from the command line, with the InvokeAI virtual environment active,
|
||||
you can then launch the front end with the command `invokeai-ti --gui`.
|
||||
|
||||
This will launch a text-based front end that will look like this:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
The interface is keyboard-based. Move from field to field using
|
||||
control-N (^N) to move to the next field and control-P (^P) to the
|
||||
previous one. <Tab> and <shift-TAB> work as well. Once a field is
|
||||
active, use the cursor keys. In a checkbox group, use the up and down
|
||||
cursor keys to move from choice to choice, and <space> to select a
|
||||
choice. In a scrollbar, use the left and right cursor keys to increase
|
||||
and decrease the value of the scroll. In textfields, type the desired
|
||||
values.
|
||||
|
||||
The number of parameters may look intimidating, but in most cases the
|
||||
predefined defaults work fine. The red circled fields in the above
|
||||
illustration are the ones you will adjust most frequently.
|
||||
|
||||
### Model Name
|
||||
|
||||
This will list all the diffusers models that are currently
|
||||
installed. Select the one you wish to use as the basis for your
|
||||
embedding. Be aware that if you use a SD-1.X-based model for your
|
||||
training, you will only be able to use this embedding with other
|
||||
SD-1.X-based models. Similarly, if you train on SD-2.X, you will only
|
||||
be able to use the embeddings with models based on SD-2.X.
|
||||
|
||||
### Trigger Term
|
||||
|
||||
This is the prompt term you will use to trigger the embedding. Type a
|
||||
single word or phrase you wish to use as the trigger, example
|
||||
"psychedelic" (without angle brackets). Within InvokeAI, you will then
|
||||
be able to activate the trigger using the syntax `<psychedelic>`.
|
||||
|
||||
### Initializer
|
||||
|
||||
This is a single character that is used internally during the training
|
||||
process as a placeholder for the trigger term. It defaults to "*" and
|
||||
can usually be left alone.
|
||||
|
||||
### Resume from last saved checkpoint
|
||||
|
||||
As training proceeds, textual inversion will write a series of
|
||||
intermediate files that can be used to resume training from where it
|
||||
was left off in the case of an interruption. This checkbox will be
|
||||
automatically selected if you provide a previously used trigger term
|
||||
and at least one checkpoint file is found on disk.
|
||||
|
||||
Note that as of 20 January 2023, resume does not seem to be working
|
||||
properly due to an issue with the upstream code.
|
||||
|
||||
### Data Training Directory
|
||||
|
||||
This is the location of the images to be used for training. When you
|
||||
select a trigger term like "my-trigger", the frontend will prepopulate
|
||||
this field with `~/invokeai/text-inversion-training-data/my-trigger`,
|
||||
but you can change the path to wherever you want.
|
||||
|
||||
### Output Destination Directory
|
||||
|
||||
This is the location of the logs, checkpoint files, and embedding
|
||||
files created during training. When you select a trigger term like
|
||||
"my-trigger", the frontend will prepopulate this field with
|
||||
`~/invokeai/text-inversion-output/my-trigger`, but you can change the
|
||||
path to wherever you want.
|
||||
|
||||
### Image resolution
|
||||
|
||||
The images in the training directory will be automatically scaled to
|
||||
the value you use here. For best results, you will want to use the
|
||||
same default resolution of the underlying model (512 pixels for
|
||||
SD-1.5, 768 for the larger version of SD-2.1).
|
||||
|
||||
### Center crop images
|
||||
|
||||
If this is selected, your images will be center cropped to make them
|
||||
square before resizing them to the desired resolution. Center cropping
|
||||
can indiscriminately cut off the top of subjects' heads for portrait
|
||||
aspect images, so if you have images like this, you may wish to use a
|
||||
photoeditor to manually crop them to a square aspect ratio.
|
||||
|
||||
### Mixed precision
|
||||
|
||||
Select the floating point precision for the embedding. "no" will
|
||||
result in a full 32-bit precision, "fp16" will provide 16-bit
|
||||
precision, and "bf16" will provide mixed precision (only available
|
||||
when XFormers is used).
|
||||
|
||||
### Max training steps
|
||||
|
||||
How many steps the training will take before the model converges. Most
|
||||
training sets will converge with 2000-3000 steps.
|
||||
|
||||
### Batch size
|
||||
|
||||
This adjusts how many training images are processed simultaneously in
|
||||
each step. Higher values will cause the training process to run more
|
||||
quickly, but use more memory. The default size will run with GPUs with
|
||||
as little as 12 GB.
|
||||
|
||||
### Learning rate
|
||||
|
||||
The rate at which the system adjusts its internal weights during
|
||||
training. Higher values risk overtraining (getting the same image each
|
||||
time), and lower values will take more steps to train a good
|
||||
model. The default of 0.0005 is conservative; you may wish to increase
|
||||
it to 0.005 to speed up training.
|
||||
|
||||
### Scale learning rate by number of GPUs, steps and batch size
|
||||
|
||||
If this is selected (the default) the system will adjust the provided
|
||||
learning rate to improve performance.
|
||||
|
||||
### Use xformers acceleration
|
||||
|
||||
This will activate XFormers memory-efficient attention. You need to
|
||||
have XFormers installed for this to have an effect.
|
||||
|
||||
### Learning rate scheduler
|
||||
|
||||
This adjusts how the learning rate changes over the course of
|
||||
training. The default "constant" means to use a constant learning rate
|
||||
for the entire training session. The other values scale the learning
|
||||
rate according to various formulas.
|
||||
|
||||
Only "constant" is supported by the XFormers library.
|
||||
|
||||
### Gradient accumulation steps
|
||||
|
||||
This is a parameter that allows you to use bigger batch sizes than
|
||||
your GPU's VRAM would ordinarily accommodate, at the cost of some
|
||||
performance.
|
||||
|
||||
### Warmup steps
|
||||
|
||||
If "constant_with_warmup" is selected in the learning rate scheduler,
|
||||
then this provides the number of warmup steps. Warmup steps have a
|
||||
very low learning rate, and are one way of preventing early
|
||||
overtraining.
|
||||
|
||||
## The training run
|
||||
|
||||
Start the training run by advancing to the OK button (bottom right)
|
||||
and pressing <enter>. A series of progress messages will be displayed
|
||||
as the training process proceeds. This may take an hour or two,
|
||||
depending on settings and the speed of your system. Various log and
|
||||
checkpoint files will be written into the output directory (ordinarily
|
||||
`~/invokeai/text-inversion-output/my-model/`)
|
||||
|
||||
At the end of successful training, the system will copy the file
|
||||
`learned_embeds.bin` into the InvokeAI root directory's `embeddings`
|
||||
directory, using a subdirectory named after the trigger token. For
|
||||
example, if the trigger token was `psychedelic`, then look for the
|
||||
embeddings file in
|
||||
`~/invokeai/embeddings/psychedelic/learned_embeds.bin`
|
||||
|
||||
You may now launch InvokeAI and try out a prompt that uses the trigger
|
||||
term. For example `a plate of banana sushi in <psychedelic> style`.
|
||||
|
||||
## **Training with the Command-Line Script**
|
||||
|
||||
Training can also be done using a traditional command-line script. It
|
||||
can be launched from within the "developer's console", or from the
|
||||
command line after activating InvokeAI's virtual environment.
|
||||
|
||||
It accepts a large number of arguments, which can be summarized by
|
||||
passing the `--help` argument:
|
||||
|
||||
```sh
|
||||
invokeai-ti --help
|
||||
```
|
||||
|
||||
Typical usage is shown here:
|
||||
```sh
|
||||
invokeai-ti \
|
||||
--model=stable-diffusion-1.5 \
|
||||
--resolution=512 \
|
||||
--learnable_property=style \
|
||||
--initializer_token='*' \
|
||||
--placeholder_token='<psychedelic>' \
|
||||
--train_data_dir=/home/lstein/invokeai/training-data/psychedelic \
|
||||
--output_dir=/home/lstein/invokeai/text-inversion-training/psychedelic \
|
||||
--scale_lr \
|
||||
--train_batch_size=8 \
|
||||
--gradient_accumulation_steps=4 \
|
||||
--max_train_steps=3000 \
|
||||
--learning_rate=0.0005 \
|
||||
--resume_from_checkpoint=latest \
|
||||
--lr_scheduler=constant \
|
||||
--mixed_precision=fp16 \
|
||||
--only_save_embeds
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### `Cannot load embedding for <trigger>. It was trained on a model with token dimension 1024, but the current model has token dimension 768`
|
||||
|
||||
Messages like this indicate you trained the embedding on a different base model than the currently selected one.
|
||||
|
||||
For example, in the error above, the training was done on SD2.1 (768x768) but it was used on SD1.5 (512x512).
|
||||
|
||||
## Reading
|
||||
|
||||
For more information on textual inversion, please see the following
|
||||
resources:
|
||||
|
||||
* The [textual inversion repository](https://github.com/rinongal/textual_inversion) and
|
||||
associated paper for details and limitations.
|
||||
* [HuggingFace's textual inversion training
|
||||
page](https://huggingface.co/docs/diffusers/training/text_inversion)
|
||||
* [HuggingFace example script
|
||||
documentation](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion)
|
||||
(Note that this script is similar to, but not identical, to
|
||||
`textual_inversion`, but produces embed files that are completely compatible.
|
||||
|
||||
---
|
||||
|
||||
copyright (c) 2023, Lincoln Stein and the InvokeAI Development Team
|
||||
You can find more by visiting the repo at https://github.com/invoke-ai/invoke-training
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
# Automatic Install
|
||||
# Automatic Install & Updates
|
||||
|
||||
The installer is used for both new installs and updates.
|
||||
**The same packaged installer file can be used for both new installs and updates.**
|
||||
Using the installer for updates will leave everything you've added since installation, and just update the core libraries used to run Invoke.
|
||||
Simply use the same path you installed to originally.
|
||||
|
||||
Both release and pre-release versions can be installed using it. It also supports install a wheel if needed.
|
||||
Both release and pre-release versions can be installed using the installer. It also supports install through a wheel if needed.
|
||||
|
||||
Be sure to review the [installation requirements] and ensure your system has everything it needs to install Invoke.
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Installation Overview
|
||||
# Installation and Updating Overview
|
||||
|
||||
Before installing, review the [installation requirements] to ensure your system is set up properly.
|
||||
|
||||
@@ -6,14 +6,21 @@ See the [FAQ] for frequently-encountered installation issues.
|
||||
|
||||
If you need more help, join our [discord] or [create an issue].
|
||||
|
||||
<h2>Automatic Install</h2>
|
||||
<h2>Automatic Install & Updates </h2>
|
||||
|
||||
✅ The automatic install is the best way to run InvokeAI. Check out the [installation guide] to get started.
|
||||
|
||||
⬆️ The same installer is also the best way to update InvokeAI - Simply rerun it for the same folder you installed to.
|
||||
|
||||
The installation process simply manages installation for the core libraries & application dependencies that run Invoke.
|
||||
Any models, images, or other assets in the Invoke root folder won't be affected by the installation process.
|
||||
|
||||
<h2>Manual Install</h2>
|
||||
|
||||
If you are familiar with python and want more control over the packages that are installed, you can [install InvokeAI manually via PyPI].
|
||||
|
||||
Updates are managed by reinstalling the latest version through PyPi.
|
||||
|
||||
<h2>Developer Install</h2>
|
||||
|
||||
If you want to contribute to InvokeAI, consult the [developer install guide].
|
||||
|
||||
@@ -37,13 +37,13 @@ Invoke runs best with a dedicated GPU, but will fall back to running on CPU, alb
|
||||
=== "Nvidia"
|
||||
|
||||
```
|
||||
Any GPU with at least 8GB VRAM. Linux only.
|
||||
Any GPU with at least 8GB VRAM.
|
||||
```
|
||||
|
||||
=== "AMD"
|
||||
|
||||
```
|
||||
Any GPU with at least 16GB VRAM.
|
||||
Any GPU with at least 16GB VRAM. Linux only.
|
||||
```
|
||||
|
||||
=== "Mac"
|
||||
|
||||
@@ -13,7 +13,6 @@ from pydantic import BaseModel, Field
|
||||
from invokeai.app.invocations.upscale import ESRGAN_MODELS
|
||||
from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus
|
||||
from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch
|
||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||
from invokeai.backend.util.logging import logging
|
||||
from invokeai.version import __version__
|
||||
|
||||
@@ -109,9 +108,7 @@ async def get_config() -> AppConfig:
|
||||
upscaling_models.append(str(Path(model).stem))
|
||||
upscaler = Upscaler(upscaling_method="esrgan", upscaling_models=upscaling_models)
|
||||
|
||||
nsfw_methods = []
|
||||
if SafetyChecker.safety_checker_available():
|
||||
nsfw_methods.append("nsfw_checker")
|
||||
nsfw_methods = ["nsfw_checker"]
|
||||
|
||||
watermarking_methods = ["invisible_watermark"]
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import pathlib
|
||||
import shutil
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
|
||||
from fastapi import Body, Path, Query, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
@@ -16,6 +16,7 @@ from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field
|
||||
from starlette.exceptions import HTTPException
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from invokeai.app.services.model_images.model_images_common import ModelImageFileNotFoundException
|
||||
from invokeai.app.services.model_install import ModelInstallJob
|
||||
from invokeai.app.services.model_records import (
|
||||
DuplicateModelException,
|
||||
@@ -52,6 +53,13 @@ class ModelsList(BaseModel):
|
||||
model_config = ConfigDict(use_enum_values=True)
|
||||
|
||||
|
||||
def add_cover_image_to_model_config(config: AnyModelConfig, dependencies: Type[ApiDependencies]) -> AnyModelConfig:
|
||||
"""Add a cover image URL to a model configuration."""
|
||||
cover_image = dependencies.invoker.services.model_images.get_url(config.key)
|
||||
config.cover_image = cover_image
|
||||
return config
|
||||
|
||||
|
||||
##############################################################################
|
||||
# These are example inputs and outputs that are used in places where Swagger
|
||||
# is unable to generate a correct example.
|
||||
@@ -118,8 +126,7 @@ async def list_model_records(
|
||||
record_store.search_by_attr(model_type=model_type, model_name=model_name, model_format=model_format)
|
||||
)
|
||||
for model in found_models:
|
||||
cover_image = ApiDependencies.invoker.services.model_images.get_url(model.key)
|
||||
model.cover_image = cover_image
|
||||
model = add_cover_image_to_model_config(model, ApiDependencies)
|
||||
return ModelsList(models=found_models)
|
||||
|
||||
|
||||
@@ -160,12 +167,9 @@ async def get_model_record(
|
||||
key: str = Path(description="Key of the model record to fetch."),
|
||||
) -> AnyModelConfig:
|
||||
"""Get a model record"""
|
||||
record_store = ApiDependencies.invoker.services.model_manager.store
|
||||
try:
|
||||
config: AnyModelConfig = record_store.get_model(key)
|
||||
cover_image = ApiDependencies.invoker.services.model_images.get_url(key)
|
||||
config.cover_image = cover_image
|
||||
return config
|
||||
config = ApiDependencies.invoker.services.model_manager.store.get_model(key)
|
||||
return add_cover_image_to_model_config(config, ApiDependencies)
|
||||
except UnknownModelException as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
@@ -294,14 +298,15 @@ async def update_model_record(
|
||||
installer = ApiDependencies.invoker.services.model_manager.install
|
||||
try:
|
||||
record_store.update_model(key, changes=changes)
|
||||
model_response: AnyModelConfig = installer.sync_model_path(key)
|
||||
config = installer.sync_model_path(key)
|
||||
config = add_cover_image_to_model_config(config, ApiDependencies)
|
||||
logger.info(f"Updated model: {key}")
|
||||
except UnknownModelException as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except ValueError as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
return model_response
|
||||
return config
|
||||
|
||||
|
||||
@model_manager_router.get(
|
||||
@@ -648,6 +653,14 @@ async def convert_model(
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
|
||||
# Update the model image if the model had one
|
||||
try:
|
||||
model_image = ApiDependencies.invoker.services.model_images.get(key)
|
||||
ApiDependencies.invoker.services.model_images.save(model_image, new_key)
|
||||
ApiDependencies.invoker.services.model_images.delete(key)
|
||||
except ModelImageFileNotFoundException:
|
||||
pass
|
||||
|
||||
# delete the original safetensors file
|
||||
installer.delete(key)
|
||||
|
||||
@@ -655,7 +668,8 @@ async def convert_model(
|
||||
shutil.rmtree(cache_path)
|
||||
|
||||
# return the config record for the new diffusers directory
|
||||
new_config: AnyModelConfig = store.get_model(new_key)
|
||||
new_config = store.get_model(new_key)
|
||||
new_config = add_cover_image_to_model_config(new_config, ApiDependencies)
|
||||
return new_config
|
||||
|
||||
|
||||
|
||||
@@ -35,22 +35,16 @@ from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize
|
||||
from invokeai.backend.image_util.canny import get_canny_edges
|
||||
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
|
||||
from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector
|
||||
from invokeai.backend.image_util.hed import HEDProcessor
|
||||
from invokeai.backend.image_util.lineart import LineartProcessor
|
||||
from invokeai.backend.image_util.lineart_anime import LineartAnimeProcessor
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
|
||||
from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
|
||||
CONTROLNET_MODE_VALUES = Literal["balanced", "more_prompt", "more_control", "unbalanced"]
|
||||
CONTROLNET_RESIZE_VALUES = Literal[
|
||||
"just_resize",
|
||||
"crop_resize",
|
||||
"fill_resize",
|
||||
"just_resize_simple",
|
||||
]
|
||||
from .baseinvocation import BaseInvocation, BaseInvocationOutput, Classification, invocation, invocation_output
|
||||
|
||||
|
||||
class ControlField(BaseModel):
|
||||
@@ -171,13 +165,13 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Canny Processor",
|
||||
tags=["controlnet", "canny"],
|
||||
category="controlnet",
|
||||
version="1.3.2",
|
||||
version="1.3.3",
|
||||
)
|
||||
class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Canny edge detection for ControlNet"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
low_threshold: int = InputField(
|
||||
default=100, ge=0, le=255, description="The low threshold of the Canny pixel gradient (0-255)"
|
||||
)
|
||||
@@ -205,13 +199,13 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="HED (softedge) Processor",
|
||||
tags=["controlnet", "hed", "softedge"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class HedImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies HED edge detection to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
# safe not supported in controlnet_aux v0.0.3
|
||||
# safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
||||
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
||||
@@ -234,13 +228,13 @@ class HedImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Lineart Processor",
|
||||
tags=["controlnet", "lineart"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies line art processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
coarse: bool = InputField(default=False, description="Whether to use coarse mode")
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
@@ -256,13 +250,13 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Lineart Anime Processor",
|
||||
tags=["controlnet", "lineart", "anime"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies line art anime processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
processor = LineartAnimeProcessor()
|
||||
@@ -279,15 +273,15 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Midas Depth Processor",
|
||||
tags=["controlnet", "midas"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
version="1.2.4",
|
||||
)
|
||||
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Midas depth processing to image"""
|
||||
|
||||
a_mult: float = InputField(default=2.0, ge=0, description="Midas parameter `a_mult` (a = a_mult * PI)")
|
||||
bg_th: float = InputField(default=0.1, ge=0, description="Midas parameter `bg_th`")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
# depth_and_normal not supported in controlnet_aux v0.0.3
|
||||
# depth_and_normal: bool = InputField(default=False, description="whether to use depth and normal mode")
|
||||
|
||||
@@ -310,13 +304,13 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Normal BAE Processor",
|
||||
tags=["controlnet"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies NormalBae processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
normalbae_processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
|
||||
@@ -327,13 +321,13 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
|
||||
|
||||
@invocation(
|
||||
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.2"
|
||||
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.3"
|
||||
)
|
||||
class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies MLSD processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
thr_v: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_v`")
|
||||
thr_d: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_d`")
|
||||
|
||||
@@ -350,13 +344,13 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
||||
|
||||
|
||||
@invocation(
|
||||
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.2"
|
||||
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.3"
|
||||
)
|
||||
class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies PIDI processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
||||
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
||||
|
||||
@@ -377,13 +371,13 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Content Shuffle Processor",
|
||||
tags=["controlnet", "contentshuffle"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies content shuffle processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
h: int = InputField(default=512, ge=0, description="Content shuffle `h` parameter")
|
||||
w: int = InputField(default=512, ge=0, description="Content shuffle `w` parameter")
|
||||
f: int = InputField(default=256, ge=0, description="Content shuffle `f` parameter")
|
||||
@@ -407,7 +401,7 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Zoe (Depth) Processor",
|
||||
tags=["controlnet", "zoe", "depth"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Zoe depth processing to image"""
|
||||
@@ -423,15 +417,15 @@ class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Mediapipe Face Processor",
|
||||
tags=["controlnet", "mediapipe", "face"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
version="1.2.4",
|
||||
)
|
||||
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies mediapipe face processing to image"""
|
||||
|
||||
max_faces: int = InputField(default=1, ge=1, description="Maximum number of faces to detect")
|
||||
min_confidence: float = InputField(default=0.5, ge=0, le=1, description="Minimum confidence for face detection")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
mediapipe_face_processor = MediapipeFaceDetector()
|
||||
@@ -450,7 +444,7 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Leres (Depth) Processor",
|
||||
tags=["controlnet", "leres", "depth"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies leres processing to image"""
|
||||
@@ -458,8 +452,8 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
||||
thr_a: float = InputField(default=0, description="Leres parameter `thr_a`")
|
||||
thr_b: float = InputField(default=0, description="Leres parameter `thr_b`")
|
||||
boost: bool = InputField(default=False, description="Whether to use boost mode")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators")
|
||||
@@ -479,7 +473,7 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Tile Resample Processor",
|
||||
tags=["controlnet", "tile"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Tile resampler processor"""
|
||||
@@ -519,13 +513,13 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Segment Anything Processor",
|
||||
tags=["controlnet", "segmentanything"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
version="1.2.4",
|
||||
)
|
||||
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies segment anything processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
# segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints")
|
||||
@@ -566,12 +560,12 @@ class SamDetectorReproducibleColors(SamDetector):
|
||||
title="Color Map Processor",
|
||||
tags=["controlnet"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a color map from the provided image"""
|
||||
|
||||
color_map_tile_size: int = InputField(default=64, ge=0, description=FieldDescriptions.tile_size)
|
||||
color_map_tile_size: int = InputField(default=64, ge=1, description=FieldDescriptions.tile_size)
|
||||
|
||||
def run_processor(self, image: Image.Image):
|
||||
np_image = np.array(image, dtype=np.uint8)
|
||||
@@ -598,7 +592,7 @@ DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
|
||||
title="Depth Anything Processor",
|
||||
tags=["controlnet", "depth", "depth anything"],
|
||||
category="controlnet",
|
||||
version="1.1.1",
|
||||
version="1.1.2",
|
||||
)
|
||||
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a depth map based on the Depth Anything algorithm"""
|
||||
@@ -606,7 +600,7 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
model_size: DEPTH_ANYTHING_MODEL_SIZES = InputField(
|
||||
default="small", description="The size of the depth model to use"
|
||||
)
|
||||
resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res)
|
||||
resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image):
|
||||
depth_anything_detector = DepthAnythingDetector()
|
||||
@@ -621,7 +615,7 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="DW Openpose Image Processor",
|
||||
tags=["controlnet", "dwpose", "openpose"],
|
||||
category="controlnet",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
)
|
||||
class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates an openpose pose from an image using DWPose"""
|
||||
@@ -629,7 +623,7 @@ class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
draw_body: bool = InputField(default=True)
|
||||
draw_face: bool = InputField(default=False)
|
||||
draw_hands: bool = InputField(default=False)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image):
|
||||
dw_openpose = DWOpenposeDetector()
|
||||
@@ -641,3 +635,27 @@ class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
resolution=self.image_resolution,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"heuristic_resize",
|
||||
title="Heuristic Resize",
|
||||
tags=["image, controlnet"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class HeuristicResizeInvocation(BaseInvocation):
|
||||
"""Resize an image using a heuristic method. Preserves edge maps."""
|
||||
|
||||
image: ImageField = InputField(description="The image to resize")
|
||||
width: int = InputField(default=512, ge=1, description="The width to resize to (px)")
|
||||
height: int = InputField(default=512, ge=1, description="The height to resize to (px)")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, "RGB")
|
||||
np_img = pil_to_np(image)
|
||||
np_resized = heuristic_resize(np_img, (self.width, self.height))
|
||||
resized = np_to_pil(np_resized)
|
||||
image_dto = context.images.save(image=resized)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Literal, Optional
|
||||
|
||||
import cv2
|
||||
@@ -504,7 +503,7 @@ class ImageInverseLerpInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Blur NSFW Image",
|
||||
tags=["image", "nsfw"],
|
||||
category="image",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add blur to NSFW-flagged images"""
|
||||
@@ -516,23 +515,12 @@ class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
logger = context.logger
|
||||
logger.debug("Running NSFW checker")
|
||||
if SafetyChecker.has_nsfw_concept(image):
|
||||
logger.info("A potentially NSFW image has been detected. Image will be blurred.")
|
||||
blurry_image = image.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
caution = self._get_caution_img()
|
||||
blurry_image.paste(caution, (0, 0), caution)
|
||||
image = blurry_image
|
||||
image = SafetyChecker.blur_if_nsfw(image)
|
||||
|
||||
image_dto = context.images.save(image=image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
def _get_caution_img(self) -> Image.Image:
|
||||
import invokeai.app.assets.images as image_assets
|
||||
|
||||
caution = Image.open(Path(image_assets.__path__[0]) / "caution.png")
|
||||
return caution.resize((caution.width // 2, caution.height // 2))
|
||||
|
||||
|
||||
@invocation(
|
||||
"img_watermark",
|
||||
|
||||
@@ -3,7 +3,7 @@ import inspect
|
||||
import math
|
||||
from contextlib import ExitStack
|
||||
from functools import singledispatchmethod
|
||||
from typing import Any, Iterator, List, Literal, Optional, Tuple, Union
|
||||
from typing import Any, Dict, Iterator, List, Literal, Optional, Tuple, Union
|
||||
|
||||
import einops
|
||||
import numpy as np
|
||||
@@ -11,7 +11,6 @@ import numpy.typing as npt
|
||||
import torch
|
||||
import torchvision
|
||||
import torchvision.transforms as T
|
||||
from diffusers import AutoencoderKL, AutoencoderTiny
|
||||
from diffusers.configuration_utils import ConfigMixin
|
||||
from diffusers.image_processor import VaeImageProcessor
|
||||
from diffusers.models.adapter import T2IAdapter
|
||||
@@ -21,9 +20,12 @@ from diffusers.models.attention_processor import (
|
||||
LoRAXFormersAttnProcessor,
|
||||
XFormersAttnProcessor,
|
||||
)
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
|
||||
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
from diffusers.schedulers import DPMSolverSDEScheduler
|
||||
from diffusers.schedulers import SchedulerMixin as Scheduler
|
||||
from diffusers.schedulers.scheduling_dpmsolver_sde import DPMSolverSDEScheduler
|
||||
from diffusers.schedulers.scheduling_tcd import TCDScheduler
|
||||
from diffusers.schedulers.scheduling_utils import SchedulerMixin as Scheduler
|
||||
from PIL import Image, ImageFilter
|
||||
from pydantic import field_validator
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
@@ -341,7 +343,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
)
|
||||
steps: int = InputField(default=10, gt=0, description=FieldDescriptions.steps)
|
||||
cfg_scale: Union[float, List[float]] = InputField(
|
||||
default=7.5, ge=1, description=FieldDescriptions.cfg_scale, title="CFG Scale"
|
||||
default=7.5, description=FieldDescriptions.cfg_scale, title="CFG Scale"
|
||||
)
|
||||
denoising_start: float = InputField(
|
||||
default=0.0,
|
||||
@@ -521,9 +523,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
if is_sdxl:
|
||||
return SDXLConditioningInfo(
|
||||
embeds=text_embedding, pooled_embeds=pooled_embedding, add_time_ids=add_time_ids
|
||||
), regions
|
||||
return (
|
||||
SDXLConditioningInfo(embeds=text_embedding, pooled_embeds=pooled_embedding, add_time_ids=add_time_ids),
|
||||
regions,
|
||||
)
|
||||
return BasicConditioningInfo(embeds=text_embedding), regions
|
||||
|
||||
def get_conditioning_data(
|
||||
@@ -563,6 +566,11 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
dtype=unet.dtype,
|
||||
)
|
||||
|
||||
if isinstance(self.cfg_scale, list):
|
||||
assert (
|
||||
len(self.cfg_scale) == self.steps
|
||||
), "cfg_scale (list) must have the same length as the number of steps"
|
||||
|
||||
conditioning_data = TextConditioningData(
|
||||
uncond_text=uncond_text_embedding,
|
||||
cond_text=cond_text_embedding,
|
||||
@@ -578,13 +586,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
unet: UNet2DConditionModel,
|
||||
scheduler: Scheduler,
|
||||
) -> StableDiffusionGeneratorPipeline:
|
||||
# TODO:
|
||||
# configure_model_padding(
|
||||
# unet,
|
||||
# self.seamless,
|
||||
# self.seamless_axes,
|
||||
# )
|
||||
|
||||
class FakeVae:
|
||||
class FakeVaeConfig:
|
||||
def __init__(self) -> None:
|
||||
@@ -820,7 +821,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
denoising_start: float,
|
||||
denoising_end: float,
|
||||
seed: int,
|
||||
) -> Tuple[int, List[int], int]:
|
||||
) -> Tuple[int, List[int], int, Dict[str, Any]]:
|
||||
assert isinstance(scheduler, ConfigMixin)
|
||||
if scheduler.config.get("cpu_only", False):
|
||||
scheduler.set_timesteps(steps, device="cpu")
|
||||
@@ -848,13 +849,15 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
timesteps = timesteps[t_start_idx : t_start_idx + t_end_idx]
|
||||
num_inference_steps = len(timesteps) // scheduler.order
|
||||
|
||||
scheduler_step_kwargs = {}
|
||||
scheduler_step_kwargs: Dict[str, Any] = {}
|
||||
scheduler_step_signature = inspect.signature(scheduler.step)
|
||||
if "generator" in scheduler_step_signature.parameters:
|
||||
# At some point, someone decided that schedulers that accept a generator should use the original seed with
|
||||
# all bits flipped. I don't know the original rationale for this, but now we must keep it like this for
|
||||
# reproducibility.
|
||||
scheduler_step_kwargs = {"generator": torch.Generator(device=device).manual_seed(seed ^ 0xFFFFFFFF)}
|
||||
scheduler_step_kwargs.update({"generator": torch.Generator(device=device).manual_seed(seed ^ 0xFFFFFFFF)})
|
||||
if isinstance(scheduler, TCDScheduler):
|
||||
scheduler_step_kwargs.update({"eta": 1.0})
|
||||
|
||||
return num_inference_steps, timesteps, init_timestep, scheduler_step_kwargs
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ from typing import Any, Literal, Optional, Union
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
@@ -14,6 +13,7 @@ from invokeai.app.invocations.fields import (
|
||||
)
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES
|
||||
|
||||
from ...version import __version__
|
||||
|
||||
|
||||
@@ -8,11 +8,11 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_RESIZE_VALUES
|
||||
|
||||
|
||||
class T2IAdapterField(BaseModel):
|
||||
|
||||
@@ -318,10 +318,8 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
in_progress_path.rename(job.download_path)
|
||||
|
||||
def _validate_filename(self, directory: str, filename: str) -> bool:
|
||||
pc_name_max = os.pathconf(directory, "PC_NAME_MAX") if hasattr(os, "pathconf") else 260 # hardcoded for windows
|
||||
pc_path_max = (
|
||||
os.pathconf(directory, "PC_PATH_MAX") if hasattr(os, "pathconf") else 32767
|
||||
) # hardcoded for windows with long names enabled
|
||||
pc_name_max = get_pc_name_max(directory)
|
||||
pc_path_max = get_pc_path_max(directory)
|
||||
if "/" in filename:
|
||||
return False
|
||||
if filename.startswith(".."):
|
||||
@@ -419,6 +417,26 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
self._logger.warning(excp)
|
||||
|
||||
|
||||
def get_pc_name_max(directory: str) -> int:
|
||||
if hasattr(os, "pathconf"):
|
||||
try:
|
||||
return os.pathconf(directory, "PC_NAME_MAX")
|
||||
except OSError:
|
||||
# macOS w/ external drives raise OSError
|
||||
pass
|
||||
return 260 # hardcoded for windows
|
||||
|
||||
|
||||
def get_pc_path_max(directory: str) -> int:
|
||||
if hasattr(os, "pathconf"):
|
||||
try:
|
||||
return os.pathconf(directory, "PC_PATH_MAX")
|
||||
except OSError:
|
||||
# some platforms may not have this value
|
||||
pass
|
||||
return 32767 # hardcoded for windows with long names enabled
|
||||
|
||||
|
||||
# Example on_progress event handler to display a TQDM status bar
|
||||
# Activate with:
|
||||
# download_service.download(DownloadJob('http://foo.bar/baz', '/tmp', on_progress=TqdmProgress().update))
|
||||
|
||||
@@ -1,13 +1,21 @@
|
||||
from typing import Union
|
||||
from typing import Any, Literal, Union
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from controlnet_aux.util import HWC3
|
||||
from diffusers.utils import PIL_INTERPOLATION
|
||||
from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.util import nms, normalize_image_channel_count
|
||||
|
||||
CONTROLNET_RESIZE_VALUES = Literal[
|
||||
"just_resize",
|
||||
"crop_resize",
|
||||
"fill_resize",
|
||||
"just_resize_simple",
|
||||
]
|
||||
CONTROLNET_MODE_VALUES = Literal["balanced", "more_prompt", "more_control", "unbalanced"]
|
||||
|
||||
###################################################################
|
||||
# Copy of scripts/lvminthin.py from Mikubill/sd-webui-controlnet
|
||||
###################################################################
|
||||
@@ -68,17 +76,6 @@ def lvmin_thin(x, prunings=True):
|
||||
return y
|
||||
|
||||
|
||||
def nake_nms(x):
|
||||
f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
|
||||
f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
|
||||
f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
|
||||
f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
|
||||
y = np.zeros_like(x)
|
||||
for f in [f1, f2, f3, f4]:
|
||||
np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
|
||||
return y
|
||||
|
||||
|
||||
################################################################################
|
||||
# copied from Mikubill/sd-webui-controlnet external_code.py and modified for InvokeAI
|
||||
################################################################################
|
||||
@@ -134,98 +131,122 @@ def pixel_perfect_resolution(
|
||||
return int(np.round(estimation))
|
||||
|
||||
|
||||
def clone_contiguous(x: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
|
||||
"""Get a memory-contiguous clone of the given numpy array, as a safety measure and to improve computation efficiency."""
|
||||
return np.ascontiguousarray(x).copy()
|
||||
|
||||
|
||||
def np_img_to_torch(np_img: np.ndarray[Any, Any], device: torch.device) -> torch.Tensor:
|
||||
"""Convert a numpy image to a PyTorch tensor. The image is normalized to 0-1, rearranged to BCHW format and sent to
|
||||
the specified device."""
|
||||
|
||||
torch_img = torch.from_numpy(np_img)
|
||||
normalized = torch_img.float() / 255.0
|
||||
bchw = rearrange(normalized, "h w c -> 1 c h w")
|
||||
on_device = bchw.to(device)
|
||||
return on_device.clone()
|
||||
|
||||
|
||||
def heuristic_resize(np_img: np.ndarray[Any, Any], size: tuple[int, int]) -> np.ndarray[Any, Any]:
|
||||
"""Resizes an image using a heuristic to choose the best resizing strategy.
|
||||
|
||||
- If the image appears to be an edge map, special handling will be applied to ensure the edges are not distorted.
|
||||
- Single-pixel edge maps use NMS and thinning to keep the edges as single-pixel lines.
|
||||
- Low-color-count images are resized with nearest-neighbor to preserve color information (for e.g. segmentation maps).
|
||||
- The alpha channel is handled separately to ensure it is resized correctly.
|
||||
|
||||
Args:
|
||||
np_img (np.ndarray): The input image.
|
||||
size (tuple[int, int]): The target size for the image.
|
||||
|
||||
Returns:
|
||||
np.ndarray: The resized image.
|
||||
|
||||
Adapted from https://github.com/Mikubill/sd-webui-controlnet.
|
||||
"""
|
||||
|
||||
# Return early if the image is already at the requested size
|
||||
if np_img.shape[0] == size[1] and np_img.shape[1] == size[0]:
|
||||
return np_img
|
||||
|
||||
# If the image has an alpha channel, separate it for special handling later.
|
||||
inpaint_mask = None
|
||||
if np_img.ndim == 3 and np_img.shape[2] == 4:
|
||||
inpaint_mask = np_img[:, :, 3]
|
||||
np_img = np_img[:, :, 0:3]
|
||||
|
||||
new_size_is_smaller = (size[0] * size[1]) < (np_img.shape[0] * np_img.shape[1])
|
||||
new_size_is_bigger = (size[0] * size[1]) > (np_img.shape[0] * np_img.shape[1])
|
||||
unique_color_count = np.unique(np_img.reshape(-1, np_img.shape[2]), axis=0).shape[0]
|
||||
is_one_pixel_edge = False
|
||||
is_binary = False
|
||||
|
||||
if unique_color_count == 2:
|
||||
# If the image has only two colors, it is likely binary. Check if the image has one-pixel edges.
|
||||
is_binary = np.min(np_img) < 16 and np.max(np_img) > 240
|
||||
if is_binary:
|
||||
eroded = cv2.erode(np_img, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
|
||||
dilated = cv2.dilate(eroded, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
|
||||
one_pixel_edge_count = np.where(dilated < np_img)[0].shape[0]
|
||||
all_edge_count = np.where(np_img > 127)[0].shape[0]
|
||||
is_one_pixel_edge = one_pixel_edge_count * 2 > all_edge_count
|
||||
|
||||
if 2 < unique_color_count < 200:
|
||||
# With a low color count, we assume this is a map where exact colors are important. Near-neighbor preserves
|
||||
# the colors as needed.
|
||||
interpolation = cv2.INTER_NEAREST
|
||||
elif new_size_is_smaller:
|
||||
# This works best for downscaling
|
||||
interpolation = cv2.INTER_AREA
|
||||
else:
|
||||
# Fall back for other cases
|
||||
interpolation = cv2.INTER_CUBIC # Must be CUBIC because we now use nms. NEVER CHANGE THIS
|
||||
|
||||
# This may be further transformed depending on the binary nature of the image.
|
||||
resized = cv2.resize(np_img, size, interpolation=interpolation)
|
||||
|
||||
if inpaint_mask is not None:
|
||||
# Resize the inpaint mask to match the resized image using the same interpolation method.
|
||||
inpaint_mask = cv2.resize(inpaint_mask, size, interpolation=interpolation)
|
||||
|
||||
# If the image is binary, we will perform some additional processing to ensure the edges are preserved.
|
||||
if is_binary:
|
||||
resized = np.mean(resized.astype(np.float32), axis=2).clip(0, 255).astype(np.uint8)
|
||||
if is_one_pixel_edge:
|
||||
# Use NMS and thinning to keep the edges as single-pixel lines.
|
||||
resized = nms(resized)
|
||||
_, resized = cv2.threshold(resized, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
resized = lvmin_thin(resized, prunings=new_size_is_bigger)
|
||||
else:
|
||||
_, resized = cv2.threshold(resized, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
resized = np.stack([resized] * 3, axis=2)
|
||||
|
||||
# Restore the alpha channel if it was present.
|
||||
if inpaint_mask is not None:
|
||||
inpaint_mask = (inpaint_mask > 127).astype(np.float32) * 255.0
|
||||
inpaint_mask = inpaint_mask[:, :, None].clip(0, 255).astype(np.uint8)
|
||||
resized = np.concatenate([resized, inpaint_mask], axis=2)
|
||||
|
||||
return resized
|
||||
|
||||
|
||||
###########################################################################
|
||||
# Copied from detectmap_proc method in scripts/detectmap_proc.py in Mikubill/sd-webui-controlnet
|
||||
# modified for InvokeAI
|
||||
###########################################################################
|
||||
# def detectmap_proc(detected_map, module, resize_mode, h, w):
|
||||
def np_img_resize(np_img: np.ndarray, resize_mode: str, h: int, w: int, device: torch.device = torch.device("cpu")):
|
||||
# if 'inpaint' in module:
|
||||
# np_img = np_img.astype(np.float32)
|
||||
# else:
|
||||
# np_img = HWC3(np_img)
|
||||
np_img = HWC3(np_img)
|
||||
def np_img_resize(
|
||||
np_img: np.ndarray,
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES,
|
||||
h: int,
|
||||
w: int,
|
||||
device: torch.device = torch.device("cpu"),
|
||||
) -> tuple[torch.Tensor, np.ndarray[Any, Any]]:
|
||||
np_img = normalize_image_channel_count(np_img)
|
||||
|
||||
def safe_numpy(x):
|
||||
# A very safe method to make sure that Apple/Mac works
|
||||
y = x
|
||||
|
||||
# below is very boring but do not change these. If you change these Apple or Mac may fail.
|
||||
y = y.copy()
|
||||
y = np.ascontiguousarray(y)
|
||||
y = y.copy()
|
||||
return y
|
||||
|
||||
def get_pytorch_control(x):
|
||||
# A very safe method to make sure that Apple/Mac works
|
||||
y = x
|
||||
|
||||
# below is very boring but do not change these. If you change these Apple or Mac may fail.
|
||||
y = torch.from_numpy(y)
|
||||
y = y.float() / 255.0
|
||||
y = rearrange(y, "h w c -> 1 c h w")
|
||||
y = y.clone()
|
||||
# y = y.to(devices.get_device_for("controlnet"))
|
||||
y = y.to(device)
|
||||
y = y.clone()
|
||||
return y
|
||||
|
||||
def high_quality_resize(x: np.ndarray, size):
|
||||
# Written by lvmin
|
||||
# Super high-quality control map up-scaling, considering binary, seg, and one-pixel edges
|
||||
inpaint_mask = None
|
||||
if x.ndim == 3 and x.shape[2] == 4:
|
||||
inpaint_mask = x[:, :, 3]
|
||||
x = x[:, :, 0:3]
|
||||
|
||||
new_size_is_smaller = (size[0] * size[1]) < (x.shape[0] * x.shape[1])
|
||||
new_size_is_bigger = (size[0] * size[1]) > (x.shape[0] * x.shape[1])
|
||||
unique_color_count = np.unique(x.reshape(-1, x.shape[2]), axis=0).shape[0]
|
||||
is_one_pixel_edge = False
|
||||
is_binary = False
|
||||
if unique_color_count == 2:
|
||||
is_binary = np.min(x) < 16 and np.max(x) > 240
|
||||
if is_binary:
|
||||
xc = x
|
||||
xc = cv2.erode(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
|
||||
xc = cv2.dilate(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
|
||||
one_pixel_edge_count = np.where(xc < x)[0].shape[0]
|
||||
all_edge_count = np.where(x > 127)[0].shape[0]
|
||||
is_one_pixel_edge = one_pixel_edge_count * 2 > all_edge_count
|
||||
|
||||
if 2 < unique_color_count < 200:
|
||||
interpolation = cv2.INTER_NEAREST
|
||||
elif new_size_is_smaller:
|
||||
interpolation = cv2.INTER_AREA
|
||||
else:
|
||||
interpolation = cv2.INTER_CUBIC # Must be CUBIC because we now use nms. NEVER CHANGE THIS
|
||||
|
||||
y = cv2.resize(x, size, interpolation=interpolation)
|
||||
if inpaint_mask is not None:
|
||||
inpaint_mask = cv2.resize(inpaint_mask, size, interpolation=interpolation)
|
||||
|
||||
if is_binary:
|
||||
y = np.mean(y.astype(np.float32), axis=2).clip(0, 255).astype(np.uint8)
|
||||
if is_one_pixel_edge:
|
||||
y = nake_nms(y)
|
||||
_, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
y = lvmin_thin(y, prunings=new_size_is_bigger)
|
||||
else:
|
||||
_, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
y = np.stack([y] * 3, axis=2)
|
||||
|
||||
if inpaint_mask is not None:
|
||||
inpaint_mask = (inpaint_mask > 127).astype(np.float32) * 255.0
|
||||
inpaint_mask = inpaint_mask[:, :, None].clip(0, 255).astype(np.uint8)
|
||||
y = np.concatenate([y, inpaint_mask], axis=2)
|
||||
|
||||
return y
|
||||
|
||||
# if resize_mode == external_code.ResizeMode.RESIZE:
|
||||
if resize_mode == "just_resize": # RESIZE
|
||||
np_img = high_quality_resize(np_img, (w, h))
|
||||
np_img = safe_numpy(np_img)
|
||||
return get_pytorch_control(np_img), np_img
|
||||
np_img = heuristic_resize(np_img, (w, h))
|
||||
np_img = clone_contiguous(np_img)
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
|
||||
old_h, old_w, _ = np_img.shape
|
||||
old_w = float(old_w)
|
||||
@@ -236,7 +257,6 @@ def np_img_resize(np_img: np.ndarray, resize_mode: str, h: int, w: int, device:
|
||||
def safeint(x: Union[int, float]) -> int:
|
||||
return int(np.round(x))
|
||||
|
||||
# if resize_mode == external_code.ResizeMode.OUTER_FIT:
|
||||
if resize_mode == "fill_resize": # OUTER_FIT
|
||||
k = min(k0, k1)
|
||||
borders = np.concatenate([np_img[0, :, :], np_img[-1, :, :], np_img[:, 0, :], np_img[:, -1, :]], axis=0)
|
||||
@@ -245,23 +265,23 @@ def np_img_resize(np_img: np.ndarray, resize_mode: str, h: int, w: int, device:
|
||||
# Inpaint hijack
|
||||
high_quality_border_color[3] = 255
|
||||
high_quality_background = np.tile(high_quality_border_color[None, None], [h, w, 1])
|
||||
np_img = high_quality_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (h - new_h) // 2)
|
||||
pad_w = max(0, (w - new_w) // 2)
|
||||
high_quality_background[pad_h : pad_h + new_h, pad_w : pad_w + new_w] = np_img
|
||||
np_img = high_quality_background
|
||||
np_img = safe_numpy(np_img)
|
||||
return get_pytorch_control(np_img), np_img
|
||||
np_img = clone_contiguous(np_img)
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
else: # resize_mode == "crop_resize" (INNER_FIT)
|
||||
k = max(k0, k1)
|
||||
np_img = high_quality_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (new_h - h) // 2)
|
||||
pad_w = max(0, (new_w - w) // 2)
|
||||
np_img = np_img[pad_h : pad_h + h, pad_w : pad_w + w]
|
||||
np_img = safe_numpy(np_img)
|
||||
return get_pytorch_control(np_img), np_img
|
||||
np_img = clone_contiguous(np_img)
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
|
||||
|
||||
def prepare_control_image(
|
||||
@@ -269,12 +289,12 @@ def prepare_control_image(
|
||||
width: int,
|
||||
height: int,
|
||||
num_channels: int = 3,
|
||||
device="cuda",
|
||||
dtype=torch.float16,
|
||||
do_classifier_free_guidance=True,
|
||||
control_mode="balanced",
|
||||
resize_mode="just_resize_simple",
|
||||
):
|
||||
device: str = "cuda",
|
||||
dtype: torch.dtype = torch.float16,
|
||||
control_mode: CONTROLNET_MODE_VALUES = "balanced",
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = "just_resize_simple",
|
||||
do_classifier_free_guidance: bool = True,
|
||||
) -> torch.Tensor:
|
||||
"""Pre-process images for ControlNets or T2I-Adapters.
|
||||
|
||||
Args:
|
||||
@@ -292,26 +312,15 @@ def prepare_control_image(
|
||||
resize_mode (str, optional): Defaults to "just_resize_simple".
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If resize_mode == "crop_resize_simple".
|
||||
NotImplementedError: If resize_mode == "fill_resize_simple".
|
||||
ValueError: If `resize_mode` is not recognized.
|
||||
ValueError: If `num_channels` is out of range.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: The pre-processed input tensor.
|
||||
"""
|
||||
if (
|
||||
resize_mode == "just_resize_simple"
|
||||
or resize_mode == "crop_resize_simple"
|
||||
or resize_mode == "fill_resize_simple"
|
||||
):
|
||||
if resize_mode == "just_resize_simple":
|
||||
image = image.convert("RGB")
|
||||
if resize_mode == "just_resize_simple":
|
||||
image = image.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])
|
||||
elif resize_mode == "crop_resize_simple":
|
||||
raise NotImplementedError(f"prepare_control_image is not implemented for resize_mode='{resize_mode}'.")
|
||||
elif resize_mode == "fill_resize_simple":
|
||||
raise NotImplementedError(f"prepare_control_image is not implemented for resize_mode='{resize_mode}'.")
|
||||
image = image.resize((width, height), resample=Image.LANCZOS)
|
||||
nimage = np.array(image)
|
||||
nimage = nimage[None, :]
|
||||
nimage = np.concatenate([nimage], axis=0)
|
||||
@@ -328,8 +337,7 @@ def prepare_control_image(
|
||||
resize_mode=resize_mode,
|
||||
h=height,
|
||||
w=width,
|
||||
# device=torch.device('cpu')
|
||||
device=device,
|
||||
device=torch.device(device),
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported resize_mode: '{resize_mode}'.")
|
||||
|
||||
@@ -4,5 +4,4 @@ Initialization file for invokeai.backend.image_util methods.
|
||||
|
||||
from .infill_methods.patchmatch import PatchMatch # noqa: F401
|
||||
from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata # noqa: F401
|
||||
from .seamless import configure_model_padding # noqa: F401
|
||||
from .util import InitImageResizer, make_grid # noqa: F401
|
||||
|
||||
@@ -8,7 +8,7 @@ from huggingface_hub import hf_hub_download
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.util import (
|
||||
non_maximum_suppression,
|
||||
nms,
|
||||
normalize_image_channel_count,
|
||||
np_to_pil,
|
||||
pil_to_np,
|
||||
@@ -134,7 +134,7 @@ class HEDProcessor:
|
||||
detected_map = cv2.resize(detected_map, (width, height), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
if scribble:
|
||||
detected_map = non_maximum_suppression(detected_map, 127, 3.0)
|
||||
detected_map = nms(detected_map, 127, 3.0)
|
||||
detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0)
|
||||
detected_map[detected_map > 4] = 255
|
||||
detected_map[detected_map < 255] = 0
|
||||
|
||||
@@ -8,7 +8,7 @@ from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from PIL import Image
|
||||
from PIL import Image, ImageFilter
|
||||
from transformers import AutoFeatureExtractor
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
@@ -16,6 +16,7 @@ from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.silence_warnings import SilenceWarnings
|
||||
|
||||
repo_id = "CompVis/stable-diffusion-safety-checker"
|
||||
CHECKER_PATH = "core/convert/stable-diffusion-safety-checker"
|
||||
|
||||
|
||||
@@ -24,30 +25,30 @@ class SafetyChecker:
|
||||
Wrapper around SafetyChecker model.
|
||||
"""
|
||||
|
||||
safety_checker = None
|
||||
feature_extractor = None
|
||||
tried_load: bool = False
|
||||
safety_checker = None
|
||||
|
||||
@classmethod
|
||||
def _load_safety_checker(cls):
|
||||
if cls.tried_load:
|
||||
if cls.safety_checker is not None and cls.feature_extractor is not None:
|
||||
return
|
||||
|
||||
try:
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(get_config().models_path / CHECKER_PATH)
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(get_config().models_path / CHECKER_PATH)
|
||||
model_path = get_config().models_path / CHECKER_PATH
|
||||
if model_path.exists():
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(model_path)
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(model_path)
|
||||
else:
|
||||
model_path.mkdir(parents=True, exist_ok=True)
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
|
||||
cls.feature_extractor.save_pretrained(model_path, safe_serialization=True)
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(repo_id)
|
||||
cls.safety_checker.save_pretrained(model_path, safe_serialization=True)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load NSFW checker: {str(e)}")
|
||||
cls.tried_load = True
|
||||
|
||||
@classmethod
|
||||
def safety_checker_available(cls) -> bool:
|
||||
return Path(get_config().models_path, CHECKER_PATH).exists()
|
||||
|
||||
@classmethod
|
||||
def has_nsfw_concept(cls, image: Image.Image) -> bool:
|
||||
if not cls.safety_checker_available() and cls.tried_load:
|
||||
return False
|
||||
cls._load_safety_checker()
|
||||
if cls.safety_checker is None or cls.feature_extractor is None:
|
||||
return False
|
||||
@@ -60,3 +61,24 @@ class SafetyChecker:
|
||||
with SilenceWarnings():
|
||||
checked_image, has_nsfw_concept = cls.safety_checker(images=x_image, clip_input=features.pixel_values)
|
||||
return has_nsfw_concept[0]
|
||||
|
||||
@classmethod
|
||||
def blur_if_nsfw(cls, image: Image.Image) -> Image.Image:
|
||||
if cls.has_nsfw_concept(image):
|
||||
logger.warning("A potentially NSFW image has been detected. Image will be blurred.")
|
||||
blurry_image = image.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
caution = cls._get_caution_img()
|
||||
# Center the caution image on the blurred image
|
||||
x = (blurry_image.width - caution.width) // 2
|
||||
y = (blurry_image.height - caution.height) // 2
|
||||
blurry_image.paste(caution, (x, y), caution)
|
||||
image = blurry_image
|
||||
|
||||
return image
|
||||
|
||||
@classmethod
|
||||
def _get_caution_img(cls) -> Image.Image:
|
||||
import invokeai.app.assets.images as image_assets
|
||||
|
||||
caution = Image.open(Path(image_assets.__path__[0]) / "caution.png")
|
||||
return caution.resize((caution.width // 2, caution.height // 2))
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
"""
|
||||
Patch for Conv2d._conv_forward that supports asymmetric padding
|
||||
"""
|
||||
working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"])
|
||||
working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"])
|
||||
return nn.functional.conv2d(
|
||||
working,
|
||||
weight,
|
||||
bias,
|
||||
self.stride,
|
||||
nn.modules.utils._pair(0),
|
||||
self.dilation,
|
||||
self.groups,
|
||||
)
|
||||
|
||||
|
||||
def configure_model_padding(model, seamless, seamless_axes):
|
||||
"""
|
||||
Modifies the 2D convolution layers to use a circular padding mode based on
|
||||
the `seamless` and `seamless_axes` options.
|
||||
"""
|
||||
# TODO: get an explicit interface for this in diffusers: https://github.com/huggingface/diffusers/issues/556
|
||||
for m in model.modules():
|
||||
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
if seamless:
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["x"] = (
|
||||
m._reversed_padding_repeated_twice[0],
|
||||
m._reversed_padding_repeated_twice[1],
|
||||
0,
|
||||
0,
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["y"] = (
|
||||
0,
|
||||
0,
|
||||
m._reversed_padding_repeated_twice[2],
|
||||
m._reversed_padding_repeated_twice[3],
|
||||
)
|
||||
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
|
||||
else:
|
||||
m._conv_forward = nn.Conv2d._conv_forward.__get__(m, nn.Conv2d)
|
||||
if hasattr(m, "asymmetric_padding_mode"):
|
||||
del m.asymmetric_padding_mode
|
||||
if hasattr(m, "asymmetric_padding"):
|
||||
del m.asymmetric_padding
|
||||
@@ -1,4 +1,5 @@
|
||||
from math import ceil, floor, sqrt
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
@@ -143,20 +144,21 @@ def resize_image_to_resolution(input_image: np.ndarray, resolution: int) -> np.n
|
||||
h = float(input_image.shape[0])
|
||||
w = float(input_image.shape[1])
|
||||
scaling_factor = float(resolution) / min(h, w)
|
||||
h *= scaling_factor
|
||||
w *= scaling_factor
|
||||
h = int(np.round(h / 64.0)) * 64
|
||||
w = int(np.round(w / 64.0)) * 64
|
||||
h = int(h * scaling_factor)
|
||||
w = int(w * scaling_factor)
|
||||
if scaling_factor > 1:
|
||||
return cv2.resize(input_image, (w, h), interpolation=cv2.INTER_LANCZOS4)
|
||||
else:
|
||||
return cv2.resize(input_image, (w, h), interpolation=cv2.INTER_AREA)
|
||||
|
||||
|
||||
def non_maximum_suppression(image: np.ndarray, threshold: int, sigma: float):
|
||||
def nms(np_img: np.ndarray, threshold: Optional[int] = None, sigma: Optional[float] = None) -> np.ndarray:
|
||||
"""
|
||||
Apply non-maximum suppression to an image.
|
||||
|
||||
If both threshold and sigma are provided, the image will blurred before the suppression and thresholded afterwards,
|
||||
resulting in a binary output image.
|
||||
|
||||
This function is adapted from https://github.com/lllyasviel/ControlNet.
|
||||
|
||||
Args:
|
||||
@@ -166,23 +168,36 @@ def non_maximum_suppression(image: np.ndarray, threshold: int, sigma: float):
|
||||
|
||||
Returns:
|
||||
The image after non-maximum suppression.
|
||||
|
||||
Raises:
|
||||
ValueError: If only one of threshold and sigma provided.
|
||||
"""
|
||||
|
||||
image = cv2.GaussianBlur(image.astype(np.float32), (0, 0), sigma)
|
||||
# Raise a value error if only one of threshold and sigma is provided
|
||||
if (threshold is None) != (sigma is None):
|
||||
raise ValueError("Both threshold and sigma must be provided if one is provided.")
|
||||
|
||||
if sigma is not None and threshold is not None:
|
||||
# Blurring the image can help to thin out features
|
||||
np_img = cv2.GaussianBlur(np_img.astype(np.float32), (0, 0), sigma)
|
||||
|
||||
filter_1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
|
||||
filter_2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
|
||||
filter_3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
|
||||
filter_4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
|
||||
|
||||
y = np.zeros_like(image)
|
||||
nms_img = np.zeros_like(np_img)
|
||||
|
||||
for f in [filter_1, filter_2, filter_3, filter_4]:
|
||||
np.putmask(y, cv2.dilate(image, kernel=f) == image, image)
|
||||
np.putmask(nms_img, cv2.dilate(np_img, kernel=f) == np_img, np_img)
|
||||
|
||||
z = np.zeros_like(y, dtype=np.uint8)
|
||||
z[y > threshold] = 255
|
||||
return z
|
||||
if sigma is not None and threshold is not None:
|
||||
# We blurred - now threshold to get a binary image
|
||||
thresholded = np.zeros_like(nms_img, dtype=np.uint8)
|
||||
thresholded[nms_img > threshold] = 255
|
||||
return thresholded
|
||||
|
||||
return nms_img
|
||||
|
||||
|
||||
def safe_step(x: np.ndarray, step: int = 2) -> np.ndarray:
|
||||
|
||||
@@ -51,6 +51,7 @@ LEGACY_CONFIGS: Dict[BaseModelType, Dict[ModelVariantType, Union[str, Dict[Sched
|
||||
},
|
||||
BaseModelType.StableDiffusionXL: {
|
||||
ModelVariantType.Normal: "sd_xl_base.yaml",
|
||||
ModelVariantType.Inpaint: "sd_xl_inpaint.yaml",
|
||||
},
|
||||
BaseModelType.StableDiffusionXLRefiner: {
|
||||
ModelVariantType.Normal: "sd_xl_refiner.yaml",
|
||||
|
||||
@@ -13,6 +13,7 @@ from diffusers import (
|
||||
LCMScheduler,
|
||||
LMSDiscreteScheduler,
|
||||
PNDMScheduler,
|
||||
TCDScheduler,
|
||||
UniPCMultistepScheduler,
|
||||
)
|
||||
|
||||
@@ -40,4 +41,5 @@ SCHEDULER_MAP = {
|
||||
"dpmpp_sde_k": (DPMSolverSDEScheduler, {"use_karras_sigmas": True, "noise_sampler_seed": 0}),
|
||||
"unipc": (UniPCMultistepScheduler, {"cpu_only": True}),
|
||||
"lcm": (LCMScheduler, {}),
|
||||
"tcd": (TCDScheduler, {}),
|
||||
}
|
||||
|
||||
@@ -1,89 +1,51 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import contextmanager
|
||||
from typing import Callable, List, Union
|
||||
from typing import Callable, List, Optional, Tuple, Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
|
||||
from diffusers.models.lora import LoRACompatibleConv
|
||||
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
|
||||
|
||||
def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
"""
|
||||
Patch for Conv2d._conv_forward that supports asymmetric padding
|
||||
"""
|
||||
working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"])
|
||||
working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"])
|
||||
return nn.functional.conv2d(
|
||||
working,
|
||||
weight,
|
||||
bias,
|
||||
self.stride,
|
||||
nn.modules.utils._pair(0),
|
||||
self.dilation,
|
||||
self.groups,
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL, AutoencoderTiny], seamless_axes: List[str]):
|
||||
if not seamless_axes:
|
||||
yield
|
||||
return
|
||||
|
||||
# Callable: (input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor
|
||||
to_restore: list[tuple[nn.Conv2d | nn.ConvTranspose2d, Callable]] = []
|
||||
# override conv_forward
|
||||
# https://github.com/huggingface/diffusers/issues/556#issuecomment-1993287019
|
||||
def _conv_forward_asymmetric(self, input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None):
|
||||
self.paddingX = (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0)
|
||||
self.paddingY = (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3])
|
||||
working = torch.nn.functional.pad(input, self.paddingX, mode=x_mode)
|
||||
working = torch.nn.functional.pad(working, self.paddingY, mode=y_mode)
|
||||
return torch.nn.functional.conv2d(
|
||||
working, weight, bias, self.stride, torch.nn.modules.utils._pair(0), self.dilation, self.groups
|
||||
)
|
||||
|
||||
original_layers: List[Tuple[nn.Conv2d, Callable]] = []
|
||||
|
||||
try:
|
||||
# Hard coded to skip down block layers, allowing for seamless tiling at the expense of prompt adherence
|
||||
skipped_layers = 1
|
||||
for m_name, m in model.named_modules():
|
||||
if not isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
continue
|
||||
x_mode = "circular" if "x" in seamless_axes else "constant"
|
||||
y_mode = "circular" if "y" in seamless_axes else "constant"
|
||||
|
||||
if isinstance(model, UNet2DConditionModel) and m_name.startswith("down_blocks.") and ".resnets." in m_name:
|
||||
# down_blocks.1.resnets.1.conv1
|
||||
_, block_num, _, resnet_num, submodule_name = m_name.split(".")
|
||||
block_num = int(block_num)
|
||||
resnet_num = int(resnet_num)
|
||||
conv_layers: List[torch.nn.Conv2d] = []
|
||||
|
||||
if block_num >= len(model.down_blocks) - skipped_layers:
|
||||
continue
|
||||
for module in model.modules():
|
||||
if isinstance(module, torch.nn.Conv2d):
|
||||
conv_layers.append(module)
|
||||
|
||||
# Skip the second resnet (could be configurable)
|
||||
if resnet_num > 0:
|
||||
continue
|
||||
|
||||
# Skip Conv2d layers (could be configurable)
|
||||
if submodule_name == "conv2":
|
||||
continue
|
||||
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["x"] = (
|
||||
m._reversed_padding_repeated_twice[0],
|
||||
m._reversed_padding_repeated_twice[1],
|
||||
0,
|
||||
0,
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["y"] = (
|
||||
0,
|
||||
0,
|
||||
m._reversed_padding_repeated_twice[2],
|
||||
m._reversed_padding_repeated_twice[3],
|
||||
)
|
||||
|
||||
to_restore.append((m, m._conv_forward))
|
||||
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
|
||||
for layer in conv_layers:
|
||||
if isinstance(layer, LoRACompatibleConv) and layer.lora_layer is None:
|
||||
layer.lora_layer = lambda *x: 0
|
||||
original_layers.append((layer, layer._conv_forward))
|
||||
layer._conv_forward = _conv_forward_asymmetric.__get__(layer, torch.nn.Conv2d)
|
||||
|
||||
yield
|
||||
|
||||
finally:
|
||||
for module, orig_conv_forward in to_restore:
|
||||
module._conv_forward = orig_conv_forward
|
||||
if hasattr(module, "asymmetric_padding_mode"):
|
||||
del module.asymmetric_padding_mode
|
||||
if hasattr(module, "asymmetric_padding"):
|
||||
del module.asymmetric_padding
|
||||
for layer, orig_conv_forward in original_layers:
|
||||
layer._conv_forward = orig_conv_forward
|
||||
|
||||
98
invokeai/configs/stable-diffusion/sd_xl_inpaint.yaml
Normal file
98
invokeai/configs/stable-diffusion/sd_xl_inpaint.yaml
Normal file
@@ -0,0 +1,98 @@
|
||||
model:
|
||||
target: sgm.models.diffusion.DiffusionEngine
|
||||
params:
|
||||
scale_factor: 0.13025
|
||||
disable_first_stage_autocast: True
|
||||
|
||||
denoiser_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
||||
params:
|
||||
num_idx: 1000
|
||||
|
||||
weighting_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
|
||||
scaling_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
|
||||
discretization_config:
|
||||
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
||||
|
||||
network_config:
|
||||
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
adm_in_channels: 2816
|
||||
num_classes: sequential
|
||||
use_checkpoint: True
|
||||
in_channels: 9
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [4, 2]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [1, 2, 4]
|
||||
num_head_channels: 64
|
||||
use_spatial_transformer: True
|
||||
use_linear_in_transformer: True
|
||||
transformer_depth: [1, 2, 10] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
|
||||
context_dim: 2048
|
||||
spatial_transformer_attn_type: softmax-xformers
|
||||
legacy: False
|
||||
|
||||
conditioner_config:
|
||||
target: sgm.modules.GeneralConditioner
|
||||
params:
|
||||
emb_models:
|
||||
# crossattn cond
|
||||
- is_trainable: False
|
||||
input_key: txt
|
||||
target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
|
||||
params:
|
||||
layer: hidden
|
||||
layer_idx: 11
|
||||
# crossattn and vector cond
|
||||
- is_trainable: False
|
||||
input_key: txt
|
||||
target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
|
||||
params:
|
||||
arch: ViT-bigG-14
|
||||
version: laion2b_s39b_b160k
|
||||
freeze: True
|
||||
layer: penultimate
|
||||
always_return_pooled: True
|
||||
legacy: False
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: original_size_as_tuple
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: crop_coords_top_left
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: target_size_as_tuple
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
|
||||
first_stage_config:
|
||||
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
attn_type: vanilla-xformers
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
@@ -25,7 +25,7 @@
|
||||
"typegen": "node scripts/typegen.js",
|
||||
"preview": "vite preview",
|
||||
"lint:knip": "knip",
|
||||
"lint:dpdm": "dpdm --no-warning --no-tree --transform --exit-code circular:0 src/main.tsx",
|
||||
"lint:dpdm": "dpdm --no-warning --no-tree --transform --exit-code circular:1 src/main.tsx",
|
||||
"lint:eslint": "eslint --max-warnings=0 .",
|
||||
"lint:prettier": "prettier --check .",
|
||||
"lint:tsc": "tsc --noEmit",
|
||||
@@ -52,47 +52,48 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@chakra-ui/react-use-size": "^2.1.0",
|
||||
"@dagrejs/dagre": "^1.1.1",
|
||||
"@dagrejs/graphlib": "^2.2.1",
|
||||
"@dagrejs/dagre": "^1.1.2",
|
||||
"@dagrejs/graphlib": "^2.2.2",
|
||||
"@dnd-kit/core": "^6.1.0",
|
||||
"@dnd-kit/sortable": "^8.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@fontsource-variable/inter": "^5.0.17",
|
||||
"@invoke-ai/ui-library": "^0.0.21",
|
||||
"@fontsource-variable/inter": "^5.0.18",
|
||||
"@invoke-ai/ui-library": "^0.0.25",
|
||||
"@nanostores/react": "^0.7.2",
|
||||
"@reduxjs/toolkit": "2.2.2",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
"chakra-react-select": "^4.7.6",
|
||||
"compare-versions": "^6.1.0",
|
||||
"dateformat": "^5.0.3",
|
||||
"framer-motion": "^11.0.22",
|
||||
"i18next": "^23.10.1",
|
||||
"i18next-http-backend": "^2.5.0",
|
||||
"fracturedjsonjs": "^4.0.1",
|
||||
"framer-motion": "^11.1.8",
|
||||
"i18next": "^23.11.3",
|
||||
"i18next-http-backend": "^2.5.1",
|
||||
"idb-keyval": "^6.2.1",
|
||||
"jsondiffpatch": "^0.6.0",
|
||||
"konva": "^9.3.6",
|
||||
"lodash-es": "^4.17.21",
|
||||
"nanostores": "^0.10.0",
|
||||
"nanostores": "^0.10.3",
|
||||
"new-github-issue-url": "^1.0.0",
|
||||
"overlayscrollbars": "^2.6.1",
|
||||
"overlayscrollbars-react": "^0.5.5",
|
||||
"overlayscrollbars": "^2.7.3",
|
||||
"overlayscrollbars-react": "^0.5.6",
|
||||
"query-string": "^9.0.0",
|
||||
"react": "^18.2.0",
|
||||
"react": "^18.3.1",
|
||||
"react-colorful": "^5.6.1",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-dom": "^18.3.1",
|
||||
"react-dropzone": "^14.2.3",
|
||||
"react-error-boundary": "^4.0.13",
|
||||
"react-hook-form": "^7.51.2",
|
||||
"react-hook-form": "^7.51.4",
|
||||
"react-hotkeys-hook": "4.5.0",
|
||||
"react-i18next": "^14.1.0",
|
||||
"react-icons": "^5.0.1",
|
||||
"react-i18next": "^14.1.1",
|
||||
"react-icons": "^5.2.0",
|
||||
"react-konva": "^18.2.10",
|
||||
"react-redux": "9.1.0",
|
||||
"react-resizable-panels": "^2.0.16",
|
||||
"react-redux": "9.1.2",
|
||||
"react-resizable-panels": "^2.0.19",
|
||||
"react-select": "5.8.0",
|
||||
"react-use": "^17.5.0",
|
||||
"react-virtuoso": "^4.7.5",
|
||||
"reactflow": "^11.10.4",
|
||||
"react-virtuoso": "^4.7.10",
|
||||
"reactflow": "^11.11.3",
|
||||
"redux-dynamic-middlewares": "^2.2.0",
|
||||
"redux-remember": "^5.1.0",
|
||||
"redux-undo": "^1.1.0",
|
||||
@@ -101,10 +102,11 @@
|
||||
"serialize-error": "^11.0.3",
|
||||
"socket.io-client": "^4.7.5",
|
||||
"use-debounce": "^10.0.0",
|
||||
"use-device-pixel-ratio": "^1.1.2",
|
||||
"use-image": "^1.1.1",
|
||||
"uuid": "^9.0.1",
|
||||
"zod": "^3.22.4",
|
||||
"zod-validation-error": "^3.0.3"
|
||||
"zod": "^3.23.6",
|
||||
"zod-validation-error": "^3.2.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@chakra-ui/react": "^2.8.2",
|
||||
@@ -115,19 +117,19 @@
|
||||
"devDependencies": {
|
||||
"@invoke-ai/eslint-config-react": "^0.0.14",
|
||||
"@invoke-ai/prettier-config-react": "^0.0.7",
|
||||
"@storybook/addon-essentials": "^8.0.4",
|
||||
"@storybook/addon-interactions": "^8.0.4",
|
||||
"@storybook/addon-links": "^8.0.4",
|
||||
"@storybook/addon-storysource": "^8.0.4",
|
||||
"@storybook/manager-api": "^8.0.4",
|
||||
"@storybook/react": "^8.0.4",
|
||||
"@storybook/react-vite": "^8.0.4",
|
||||
"@storybook/theming": "^8.0.4",
|
||||
"@storybook/addon-essentials": "^8.0.10",
|
||||
"@storybook/addon-interactions": "^8.0.10",
|
||||
"@storybook/addon-links": "^8.0.10",
|
||||
"@storybook/addon-storysource": "^8.0.10",
|
||||
"@storybook/manager-api": "^8.0.10",
|
||||
"@storybook/react": "^8.0.10",
|
||||
"@storybook/react-vite": "^8.0.10",
|
||||
"@storybook/theming": "^8.0.10",
|
||||
"@types/dateformat": "^5.0.2",
|
||||
"@types/lodash-es": "^4.17.12",
|
||||
"@types/node": "^20.11.30",
|
||||
"@types/react": "^18.2.73",
|
||||
"@types/react-dom": "^18.2.22",
|
||||
"@types/node": "^20.12.10",
|
||||
"@types/react": "^18.3.1",
|
||||
"@types/react-dom": "^18.3.0",
|
||||
"@types/uuid": "^9.0.8",
|
||||
"@vitejs/plugin-react-swc": "^3.6.0",
|
||||
"concurrently": "^8.2.2",
|
||||
@@ -135,20 +137,20 @@
|
||||
"eslint": "^8.57.0",
|
||||
"eslint-plugin-i18next": "^6.0.3",
|
||||
"eslint-plugin-path": "^1.3.0",
|
||||
"knip": "^5.6.1",
|
||||
"knip": "^5.12.3",
|
||||
"openapi-types": "^12.1.3",
|
||||
"openapi-typescript": "^6.7.5",
|
||||
"prettier": "^3.2.5",
|
||||
"rollup-plugin-visualizer": "^5.12.0",
|
||||
"storybook": "^8.0.4",
|
||||
"storybook": "^8.0.10",
|
||||
"ts-toolbelt": "^9.6.0",
|
||||
"tsafe": "^1.6.6",
|
||||
"typescript": "^5.4.3",
|
||||
"vite": "^5.2.6",
|
||||
"vite-plugin-css-injected-by-js": "^3.5.0",
|
||||
"vite-plugin-dts": "^3.8.0",
|
||||
"typescript": "^5.4.5",
|
||||
"vite": "^5.2.11",
|
||||
"vite-plugin-css-injected-by-js": "^3.5.1",
|
||||
"vite-plugin-dts": "^3.9.1",
|
||||
"vite-plugin-eslint": "^1.8.1",
|
||||
"vite-tsconfig-paths": "^4.3.2",
|
||||
"vitest": "^1.4.0"
|
||||
"vitest": "^1.6.0"
|
||||
}
|
||||
}
|
||||
|
||||
5328
invokeai/frontend/web/pnpm-lock.yaml
generated
5328
invokeai/frontend/web/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -76,7 +76,9 @@
|
||||
"aboutHeading": "Nutzen Sie Ihre kreative Energie",
|
||||
"toResolve": "Lösen",
|
||||
"add": "Hinzufügen",
|
||||
"loglevel": "Protokoll Stufe"
|
||||
"loglevel": "Protokoll Stufe",
|
||||
"selected": "Ausgewählt",
|
||||
"beta": "Beta"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Bildgröße",
|
||||
@@ -86,7 +88,7 @@
|
||||
"noImagesInGallery": "Keine Bilder in der Galerie",
|
||||
"loading": "Lade",
|
||||
"deleteImage_one": "Lösche Bild",
|
||||
"deleteImage_other": "",
|
||||
"deleteImage_other": "Lösche {{count}} Bilder",
|
||||
"copy": "Kopieren",
|
||||
"download": "Runterladen",
|
||||
"setCurrentImage": "Setze aktuelle Bild",
|
||||
@@ -397,7 +399,14 @@
|
||||
"cancel": "Stornieren",
|
||||
"defaultSettingsSaved": "Standardeinstellungen gespeichert",
|
||||
"addModels": "Model hinzufügen",
|
||||
"deleteModelImage": "Lösche Model Bild"
|
||||
"deleteModelImage": "Lösche Model Bild",
|
||||
"hfTokenInvalidErrorMessage": "Falscher oder fehlender HuggingFace Schlüssel.",
|
||||
"huggingFaceRepoID": "HuggingFace Repo ID",
|
||||
"hfToken": "HuggingFace Schlüssel",
|
||||
"hfTokenInvalid": "Falscher oder fehlender HF Schlüssel",
|
||||
"huggingFacePlaceholder": "besitzer/model-name",
|
||||
"hfTokenSaved": "HF Schlüssel gespeichert",
|
||||
"hfTokenUnableToVerify": "Konnte den HF Schlüssel nicht validieren"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Bilder",
|
||||
@@ -686,7 +695,11 @@
|
||||
"hands": "Hände",
|
||||
"dwOpenpose": "DW Openpose",
|
||||
"dwOpenposeDescription": "Posenschätzung mit DW Openpose",
|
||||
"selectCLIPVisionModel": "Wähle ein CLIP Vision Model aus"
|
||||
"selectCLIPVisionModel": "Wähle ein CLIP Vision Model aus",
|
||||
"ipAdapterMethod": "Methode",
|
||||
"composition": "Nur Komposition",
|
||||
"full": "Voll",
|
||||
"style": "Nur Style"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@@ -717,7 +730,6 @@
|
||||
"resume": "Wieder aufnehmen",
|
||||
"item": "Auftrag",
|
||||
"notReady": "Warteschlange noch nicht bereit",
|
||||
"queueCountPrediction": "{{promptsCount}} Prompts × {{iterations}} Iterationen -> {{count}} Generationen",
|
||||
"clearQueueAlertDialog": "\"Die Warteschlange leeren\" stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
|
||||
"completedIn": "Fertig in",
|
||||
"cancelBatchSucceeded": "Stapel abgebrochen",
|
||||
|
||||
@@ -88,11 +88,13 @@
|
||||
"negativePrompt": "Negative Prompt",
|
||||
"discordLabel": "Discord",
|
||||
"dontAskMeAgain": "Don't ask me again",
|
||||
"editor": "Editor",
|
||||
"error": "Error",
|
||||
"file": "File",
|
||||
"folder": "Folder",
|
||||
"format": "format",
|
||||
"githubLabel": "Github",
|
||||
"goTo": "Go to",
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"imageFailedToLoad": "Unable to Load Image",
|
||||
"img2img": "Image To Image",
|
||||
@@ -140,7 +142,11 @@
|
||||
"blue": "Blue",
|
||||
"alpha": "Alpha",
|
||||
"selected": "Selected",
|
||||
"viewer": "Viewer"
|
||||
"tab": "Tab",
|
||||
"viewing": "Viewing",
|
||||
"viewingDesc": "Review images in a large gallery view",
|
||||
"editing": "Editing",
|
||||
"editingDesc": "Edit on the Control Layers canvas"
|
||||
},
|
||||
"controlnet": {
|
||||
"controlAdapter_one": "Control Adapter",
|
||||
@@ -156,6 +162,7 @@
|
||||
"balanced": "Balanced",
|
||||
"base": "Base",
|
||||
"beginEndStepPercent": "Begin / End Step Percentage",
|
||||
"beginEndStepPercentShort": "Begin/End %",
|
||||
"bgth": "bg_th",
|
||||
"canny": "Canny",
|
||||
"cannyDescription": "Canny edge detection",
|
||||
@@ -224,10 +231,11 @@
|
||||
"composition": "Composition Only",
|
||||
"safe": "Safe",
|
||||
"saveControlImage": "Save Control Image",
|
||||
"scribble": "scribble",
|
||||
"scribble": "Scribble",
|
||||
"selectModel": "Select a model",
|
||||
"selectCLIPVisionModel": "Select a CLIP Vision model",
|
||||
"setControlImageDimensions": "Set Control Image Dimensions To W/H",
|
||||
"setControlImageDimensions": "Copy size to W/H (optimize for model)",
|
||||
"setControlImageDimensionsForce": "Copy size to W/H (ignore model)",
|
||||
"showAdvanced": "Show Advanced",
|
||||
"small": "Small",
|
||||
"toggleControlNet": "Toggle this ControlNet",
|
||||
@@ -253,7 +261,6 @@
|
||||
"queue": "Queue",
|
||||
"queueFront": "Add to Front of Queue",
|
||||
"queueBack": "Add to Queue",
|
||||
"queueCountPrediction": "{{promptsCount}} prompts \u00d7 {{iterations}} iterations -> {{count}} generations",
|
||||
"queueEmpty": "Queue Empty",
|
||||
"enqueueing": "Queueing Batch",
|
||||
"resume": "Resume",
|
||||
@@ -306,7 +313,13 @@
|
||||
"batchFailedToQueue": "Failed to Queue Batch",
|
||||
"graphQueued": "Graph queued",
|
||||
"graphFailedToQueue": "Failed to queue graph",
|
||||
"openQueue": "Open Queue"
|
||||
"openQueue": "Open Queue",
|
||||
"prompts_one": "Prompt",
|
||||
"prompts_other": "Prompts",
|
||||
"iterations_one": "Iteration",
|
||||
"iterations_other": "Iterations",
|
||||
"generations_one": "Generation",
|
||||
"generations_other": "Generations"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "Invocation Cache",
|
||||
@@ -582,6 +595,10 @@
|
||||
"upscale": {
|
||||
"desc": "Upscale the current image",
|
||||
"title": "Upscale"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"desc": "Switches between the Image Viewer and workspace for the current tab.",
|
||||
"title": "Toggle Image Viewer"
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
@@ -915,23 +932,37 @@
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} missing input",
|
||||
"missingNodeTemplate": "Missing node template",
|
||||
"noControlImageForControlAdapter": "Control Adapter #{{number}} has no control image",
|
||||
"imageNotProcessedForControlAdapter": "Control Adapter #{{number}}'s image is not processed",
|
||||
"noInitialImageSelected": "No initial image selected",
|
||||
"noModelForControlAdapter": "Control Adapter #{{number}} has no model selected.",
|
||||
"incompatibleBaseModelForControlAdapter": "Control Adapter #{{number}} model is incompatible with main model.",
|
||||
"noModelSelected": "No model selected",
|
||||
"noPrompts": "No prompts generated",
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"systemDisconnected": "System disconnected"
|
||||
"systemDisconnected": "System disconnected",
|
||||
"layer": {
|
||||
"initialImageNoImageSelected": "no initial image selected",
|
||||
"controlAdapterNoModelSelected": "no Control Adapter model selected",
|
||||
"controlAdapterIncompatibleBaseModel": "incompatible Control Adapter base model",
|
||||
"controlAdapterNoImageSelected": "no Control Adapter image selected",
|
||||
"controlAdapterImageNotProcessed": "Control Adapter image not processed",
|
||||
"t2iAdapterIncompatibleDimensions": "T2I Adapter requires image dimension to be multiples of 64",
|
||||
"ipAdapterNoModelSelected": "no IP adapter selected",
|
||||
"ipAdapterIncompatibleBaseModel": "incompatible IP Adapter base model",
|
||||
"ipAdapterNoImageSelected": "no IP Adapter image selected",
|
||||
"rgNoPromptsOrIPAdapters": "no text prompts or IP Adapters",
|
||||
"rgNoRegion": "no region selected"
|
||||
}
|
||||
},
|
||||
"maskBlur": "Mask Blur",
|
||||
"negativePromptPlaceholder": "Negative Prompt",
|
||||
"globalNegativePromptPlaceholder": "Global Negative Prompt",
|
||||
"noiseThreshold": "Noise Threshold",
|
||||
"patchmatchDownScaleSize": "Downscale",
|
||||
"perlinNoise": "Perlin Noise",
|
||||
"positivePromptPlaceholder": "Positive Prompt",
|
||||
"globalPositivePromptPlaceholder": "Global Positive Prompt",
|
||||
"iterations": "Iterations",
|
||||
"iterationsWithCount_one": "{{count}} Iteration",
|
||||
"iterationsWithCount_other": "{{count}} Iterations",
|
||||
"scale": "Scale",
|
||||
"scaleBeforeProcessing": "Scale Before Processing",
|
||||
"scaledHeight": "Scaled H",
|
||||
@@ -1511,7 +1542,7 @@
|
||||
"app": {
|
||||
"storeNotInitialized": "Store is not initialized"
|
||||
},
|
||||
"regionalPrompts": {
|
||||
"controlLayers": {
|
||||
"deleteAll": "Delete All",
|
||||
"addLayer": "Add Layer",
|
||||
"moveToFront": "Move to Front",
|
||||
@@ -1519,8 +1550,7 @@
|
||||
"moveForward": "Move Forward",
|
||||
"moveBackward": "Move Backward",
|
||||
"brushSize": "Brush Size",
|
||||
"regionalControl": "Regional Control (ALPHA)",
|
||||
"enableRegionalPrompts": "Enable $t(regionalPrompts.regionalPrompts)",
|
||||
"controlLayers": "Control Layers",
|
||||
"globalMaskOpacity": "Global Mask Opacity",
|
||||
"autoNegative": "Auto Negative",
|
||||
"toggleVisibility": "Toggle Layer Visibility",
|
||||
@@ -1531,6 +1561,35 @@
|
||||
"maskPreviewColor": "Mask Preview Color",
|
||||
"addPositivePrompt": "Add $t(common.positivePrompt)",
|
||||
"addNegativePrompt": "Add $t(common.negativePrompt)",
|
||||
"addIPAdapter": "Add $t(common.ipAdapter)"
|
||||
"addIPAdapter": "Add $t(common.ipAdapter)",
|
||||
"regionalGuidance": "Regional Guidance",
|
||||
"regionalGuidanceLayer": "$t(controlLayers.regionalGuidance) $t(unifiedCanvas.layer)",
|
||||
"opacity": "Opacity",
|
||||
"globalControlAdapter": "Global $t(controlnet.controlAdapter_one)",
|
||||
"globalControlAdapterLayer": "Global $t(controlnet.controlAdapter_one) $t(unifiedCanvas.layer)",
|
||||
"globalIPAdapter": "Global $t(common.ipAdapter)",
|
||||
"globalIPAdapterLayer": "Global $t(common.ipAdapter) $t(unifiedCanvas.layer)",
|
||||
"globalInitialImage": "Global Initial Image",
|
||||
"globalInitialImageLayer": "$t(controlLayers.globalInitialImage) $t(unifiedCanvas.layer)",
|
||||
"opacityFilter": "Opacity Filter",
|
||||
"clearProcessor": "Clear Processor",
|
||||
"resetProcessor": "Reset Processor to Defaults",
|
||||
"noLayersAdded": "No Layers Added",
|
||||
"layers_one": "Layer",
|
||||
"layers_other": "Layers"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "Generation",
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"canvas": "Canvas",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"workflows": "Workflows",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"models": "Models",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"queue": "Queue",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,24 @@
|
||||
"areYouSure": "¿Estas seguro?",
|
||||
"batch": "Administrador de lotes",
|
||||
"modelManager": "Administrador de modelos",
|
||||
"communityLabel": "Comunidad"
|
||||
"communityLabel": "Comunidad",
|
||||
"direction": "Dirección",
|
||||
"ai": "Ia",
|
||||
"add": "Añadir",
|
||||
"auto": "Automático",
|
||||
"copyError": "Error $t(gallery.copy)",
|
||||
"details": "Detalles",
|
||||
"or": "o",
|
||||
"checkpoint": "Punto de control",
|
||||
"controlNet": "ControlNet",
|
||||
"aboutHeading": "Sea dueño de su poder creativo",
|
||||
"advanced": "Avanzado",
|
||||
"data": "Fecha",
|
||||
"delete": "Borrar",
|
||||
"copy": "Copiar",
|
||||
"beta": "Beta",
|
||||
"on": "En",
|
||||
"aboutDesc": "¿Utilizas Invoke para trabajar? Mira aquí:"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Tamaño de la imagen",
|
||||
@@ -443,7 +460,13 @@
|
||||
"previousImage": "Imagen anterior",
|
||||
"nextImage": "Siguiente imagen",
|
||||
"showOptionsPanel": "Mostrar el panel lateral",
|
||||
"menu": "Menú"
|
||||
"menu": "Menú",
|
||||
"showGalleryPanel": "Mostrar panel de galería",
|
||||
"loadMore": "Cargar más",
|
||||
"about": "Acerca de",
|
||||
"createIssue": "Crear un problema",
|
||||
"resetUI": "Interfaz de usuario $t(accessibility.reset)",
|
||||
"mode": "Modo"
|
||||
},
|
||||
"nodes": {
|
||||
"zoomInNodes": "Acercar",
|
||||
@@ -456,5 +479,68 @@
|
||||
"reloadNodeTemplates": "Recargar las plantillas de nodos",
|
||||
"loadWorkflow": "Cargar el flujo de trabajo",
|
||||
"downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Agregar panel automáticamente",
|
||||
"changeBoard": "Cambiar el panel",
|
||||
"clearSearch": "Borrar la búsqueda",
|
||||
"deleteBoard": "Borrar el panel",
|
||||
"selectBoard": "Seleccionar un panel",
|
||||
"uncategorized": "Sin categoría",
|
||||
"cancel": "Cancelar",
|
||||
"addBoard": "Agregar un panel",
|
||||
"movingImagesToBoard_one": "Moviendo {{count}} imagen al panel:",
|
||||
"movingImagesToBoard_many": "Moviendo {{count}} imágenes al panel:",
|
||||
"movingImagesToBoard_other": "Moviendo {{count}} imágenes al panel:",
|
||||
"bottomMessage": "Al eliminar este panel y las imágenes que contiene, se restablecerán las funciones que los estén utilizando actualmente.",
|
||||
"deleteBoardAndImages": "Borrar el panel y las imágenes",
|
||||
"loading": "Cargando...",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar",
|
||||
"move": "Mover",
|
||||
"menuItemAutoAdd": "Agregar automáticamente a este panel",
|
||||
"searchBoard": "Buscando paneles…",
|
||||
"topMessage": "Este panel contiene imágenes utilizadas en las siguientes funciones:",
|
||||
"downloadBoard": "Descargar panel",
|
||||
"deleteBoardOnly": "Borrar solo el panel",
|
||||
"myBoard": "Mi panel",
|
||||
"noMatching": "No hay paneles que coincidan"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
"title": "Composición",
|
||||
"infillTab": "Relleno"
|
||||
},
|
||||
"generation": {
|
||||
"title": "Generación"
|
||||
},
|
||||
"image": {
|
||||
"title": "Imagen"
|
||||
},
|
||||
"control": {
|
||||
"title": "Control"
|
||||
},
|
||||
"advanced": {
|
||||
"options": "$t(accordions.advanced.title) opciones",
|
||||
"title": "Avanzado"
|
||||
}
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"canvas": "Lienzo",
|
||||
"generation": "Generación",
|
||||
"queue": "Cola",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
|
||||
"workflows": "Flujos de trabajo",
|
||||
"models": "Modelos",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)"
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
"layers_one": "Capa",
|
||||
"layers_many": "Capas",
|
||||
"layers_other": "Capas"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"reportBugLabel": "Segnala un errore",
|
||||
"settingsLabel": "Impostazioni",
|
||||
"img2img": "Immagine a Immagine",
|
||||
"unifiedCanvas": "Tela unificata",
|
||||
"unifiedCanvas": "Tela",
|
||||
"nodes": "Flussi di lavoro",
|
||||
"upload": "Caricamento",
|
||||
"load": "Carica",
|
||||
@@ -74,7 +74,18 @@
|
||||
"file": "File",
|
||||
"toResolve": "Da risolvere",
|
||||
"add": "Aggiungi",
|
||||
"loglevel": "Livello di log"
|
||||
"loglevel": "Livello di log",
|
||||
"beta": "Beta",
|
||||
"positivePrompt": "Prompt positivo",
|
||||
"negativePrompt": "Prompt negativo",
|
||||
"selected": "Selezionato",
|
||||
"goTo": "Vai a",
|
||||
"editor": "Editor",
|
||||
"tab": "Scheda",
|
||||
"viewing": "Visualizza",
|
||||
"viewingDesc": "Rivedi le immagini in un'ampia vista della galleria",
|
||||
"editing": "Modifica",
|
||||
"editingDesc": "Modifica nell'area Livelli di controllo"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@@ -180,8 +191,8 @@
|
||||
"desc": "Mostra le informazioni sui metadati dell'immagine corrente"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "Invia a Immagine a Immagine",
|
||||
"desc": "Invia l'immagine corrente a da Immagine a Immagine"
|
||||
"title": "Invia a Generazione da immagine",
|
||||
"desc": "Invia l'immagine corrente a Generazione da immagine"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "Elimina immagine",
|
||||
@@ -334,6 +345,10 @@
|
||||
"remixImage": {
|
||||
"desc": "Utilizza tutti i parametri tranne il seme dell'immagine corrente",
|
||||
"title": "Remixa l'immagine"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Attiva/disattiva il visualizzatore di immagini",
|
||||
"desc": "Passa dal Visualizzatore immagini all'area di lavoro per la scheda corrente."
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
@@ -471,8 +486,8 @@
|
||||
"scaledHeight": "Altezza ridimensionata",
|
||||
"infillMethod": "Metodo di riempimento",
|
||||
"tileSize": "Dimensione piastrella",
|
||||
"sendToImg2Img": "Invia a Immagine a Immagine",
|
||||
"sendToUnifiedCanvas": "Invia a Tela Unificata",
|
||||
"sendToImg2Img": "Invia a Generazione da immagine",
|
||||
"sendToUnifiedCanvas": "Invia alla Tela",
|
||||
"downloadImage": "Scarica l'immagine",
|
||||
"usePrompt": "Usa Prompt",
|
||||
"useSeed": "Usa Seme",
|
||||
@@ -508,13 +523,11 @@
|
||||
"incompatibleBaseModelForControlAdapter": "Il modello dell'adattatore di controllo #{{number}} non è compatibile con il modello principale.",
|
||||
"missingNodeTemplate": "Modello di nodo mancante",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} ingresso mancante",
|
||||
"missingFieldTemplate": "Modello di campo mancante"
|
||||
"missingFieldTemplate": "Modello di campo mancante",
|
||||
"imageNotProcessedForControlAdapter": "L'immagine dell'adattatore di controllo #{{number}} non è stata elaborata"
|
||||
},
|
||||
"useCpuNoise": "Usa la CPU per generare rumore",
|
||||
"iterations": "Iterazioni",
|
||||
"iterationsWithCount_one": "{{count}} Iterazione",
|
||||
"iterationsWithCount_many": "{{count}} Iterazioni",
|
||||
"iterationsWithCount_other": "{{count}} Iterazioni",
|
||||
"isAllowedToUpscale": {
|
||||
"useX2Model": "L'immagine è troppo grande per l'ampliamento con il modello x4, utilizza il modello x2",
|
||||
"tooLarge": "L'immagine è troppo grande per l'ampliamento, seleziona un'immagine più piccola"
|
||||
@@ -534,7 +547,10 @@
|
||||
"infillMosaicMinColor": "Colore minimo",
|
||||
"infillMosaicMaxColor": "Colore massimo",
|
||||
"infillMosaicTileHeight": "Altezza piastrella",
|
||||
"infillColorValue": "Colore di riempimento"
|
||||
"infillColorValue": "Colore di riempimento",
|
||||
"globalSettings": "Impostazioni globali",
|
||||
"globalPositivePromptPlaceholder": "Prompt positivo globale",
|
||||
"globalNegativePromptPlaceholder": "Prompt negativo globale"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelli",
|
||||
@@ -559,7 +575,7 @@
|
||||
"intermediatesCleared_one": "Cancellata {{count}} immagine intermedia",
|
||||
"intermediatesCleared_many": "Cancellate {{count}} immagini intermedie",
|
||||
"intermediatesCleared_other": "Cancellate {{count}} immagini intermedie",
|
||||
"clearIntermediatesDesc1": "La cancellazione delle immagini intermedie ripristinerà lo stato di Tela Unificata e ControlNet.",
|
||||
"clearIntermediatesDesc1": "La cancellazione delle immagini intermedie ripristinerà lo stato della Tela e degli Adattatori di Controllo.",
|
||||
"intermediatesClearedFailed": "Problema con la cancellazione delle immagini intermedie",
|
||||
"clearIntermediatesWithCount_one": "Cancella {{count}} immagine intermedia",
|
||||
"clearIntermediatesWithCount_many": "Cancella {{count}} immagini intermedie",
|
||||
@@ -575,8 +591,8 @@
|
||||
"imageCopied": "Immagine copiata",
|
||||
"imageNotLoadedDesc": "Impossibile trovare l'immagine",
|
||||
"canvasMerged": "Tela unita",
|
||||
"sentToImageToImage": "Inviato a Immagine a Immagine",
|
||||
"sentToUnifiedCanvas": "Inviato a Tela Unificata",
|
||||
"sentToImageToImage": "Inviato a Generazione da immagine",
|
||||
"sentToUnifiedCanvas": "Inviato alla Tela",
|
||||
"parametersNotSet": "Parametri non impostati",
|
||||
"metadataLoadFailed": "Impossibile caricare i metadati",
|
||||
"serverError": "Errore del Server",
|
||||
@@ -795,7 +811,7 @@
|
||||
"float": "In virgola mobile",
|
||||
"currentImageDescription": "Visualizza l'immagine corrente nell'editor dei nodi",
|
||||
"fieldTypesMustMatch": "I tipi di campo devono corrispondere",
|
||||
"edge": "Bordo",
|
||||
"edge": "Collegamento",
|
||||
"currentImage": "Immagine corrente",
|
||||
"integer": "Numero Intero",
|
||||
"inputMayOnlyHaveOneConnection": "L'ingresso può avere solo una connessione",
|
||||
@@ -845,7 +861,9 @@
|
||||
"resetToDefaultValue": "Ripristina il valore predefinito",
|
||||
"noFieldsViewMode": "Questo flusso di lavoro non ha campi selezionati da visualizzare. Visualizza il flusso di lavoro completo per configurare i valori.",
|
||||
"edit": "Modifica",
|
||||
"graph": "Grafico"
|
||||
"graph": "Grafico",
|
||||
"showEdgeLabelsHelp": "Mostra etichette sui collegamenti, che indicano i nodi collegati",
|
||||
"showEdgeLabels": "Mostra le etichette del collegamento"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@@ -922,7 +940,7 @@
|
||||
"colorMapTileSize": "Dimensione piastrella",
|
||||
"mediapipeFaceDescription": "Rilevamento dei volti tramite Mediapipe",
|
||||
"hedDescription": "Rilevamento dei bordi nidificati olisticamente",
|
||||
"setControlImageDimensions": "Imposta le dimensioni dell'immagine di controllo su L/A",
|
||||
"setControlImageDimensions": "Copia le dimensioni in L/A (ottimizza per il modello)",
|
||||
"maxFaces": "Numero massimo di volti",
|
||||
"addT2IAdapter": "Aggiungi $t(common.t2iAdapter)",
|
||||
"addControlNet": "Aggiungi $t(common.controlNet)",
|
||||
@@ -951,12 +969,17 @@
|
||||
"mediapipeFace": "Mediapipe Volto",
|
||||
"ip_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.ipAdapter))",
|
||||
"t2i_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.t2iAdapter))",
|
||||
"selectCLIPVisionModel": "Seleziona un modello CLIP Vision"
|
||||
"selectCLIPVisionModel": "Seleziona un modello CLIP Vision",
|
||||
"ipAdapterMethod": "Metodo",
|
||||
"full": "Completo",
|
||||
"composition": "Solo la composizione",
|
||||
"style": "Solo lo stile",
|
||||
"beginEndStepPercentShort": "Inizio/Fine %",
|
||||
"setControlImageDimensionsForce": "Copia le dimensioni in L/A (ignora il modello)"
|
||||
},
|
||||
"queue": {
|
||||
"queueFront": "Aggiungi all'inizio della coda",
|
||||
"queueBack": "Aggiungi alla coda",
|
||||
"queueCountPrediction": "{{promptsCount}} prompt × {{iterations}} iterazioni -> {{count}} generazioni",
|
||||
"queue": "Coda",
|
||||
"status": "Stato",
|
||||
"pruneSucceeded": "Rimossi {{item_count}} elementi completati dalla coda",
|
||||
@@ -993,7 +1016,7 @@
|
||||
"cancelBatchSucceeded": "Lotto annullato",
|
||||
"clearTooltip": "Annulla e cancella tutti gli elementi",
|
||||
"current": "Attuale",
|
||||
"pauseTooltip": "Sospende l'elaborazione",
|
||||
"pauseTooltip": "Sospendi l'elaborazione",
|
||||
"failed": "Falliti",
|
||||
"cancelItem": "Annulla l'elemento",
|
||||
"next": "Prossimo",
|
||||
@@ -1394,6 +1417,12 @@
|
||||
"paragraphs": [
|
||||
"La dimensione del bordo del passaggio di coerenza."
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Metodo",
|
||||
"paragraphs": [
|
||||
"Metodo con cui applicare l'adattatore IP corrente."
|
||||
]
|
||||
}
|
||||
},
|
||||
"sdxl": {
|
||||
@@ -1522,5 +1551,56 @@
|
||||
"compatibleEmbeddings": "Incorporamenti compatibili",
|
||||
"addPromptTrigger": "Aggiungi Trigger nel prompt",
|
||||
"noMatchingTriggers": "Nessun Trigger corrispondente"
|
||||
},
|
||||
"controlLayers": {
|
||||
"opacityFilter": "Filtro opacità",
|
||||
"deleteAll": "Cancella tutto",
|
||||
"addLayer": "Aggiungi Livello",
|
||||
"moveToFront": "Sposta in primo piano",
|
||||
"moveToBack": "Sposta in fondo",
|
||||
"moveForward": "Sposta avanti",
|
||||
"moveBackward": "Sposta indietro",
|
||||
"brushSize": "Dimensioni del pennello",
|
||||
"globalMaskOpacity": "Opacità globale della maschera",
|
||||
"autoNegative": "Auto Negativo",
|
||||
"toggleVisibility": "Attiva/disattiva la visibilità dei livelli",
|
||||
"deletePrompt": "Cancella il prompt",
|
||||
"debugLayers": "Debug dei Livelli",
|
||||
"rectangle": "Rettangolo",
|
||||
"maskPreviewColor": "Colore anteprima maschera",
|
||||
"addPositivePrompt": "Aggiungi $t(common.positivePrompt)",
|
||||
"addNegativePrompt": "Aggiungi $t(common.negativePrompt)",
|
||||
"addIPAdapter": "Aggiungi $t(common.ipAdapter)",
|
||||
"regionalGuidance": "Guida regionale",
|
||||
"regionalGuidanceLayer": "$t(unifiedCanvas.layer) $t(controlLayers.regionalGuidance)",
|
||||
"opacity": "Opacità",
|
||||
"globalControlAdapter": "$t(controlnet.controlAdapter_one) Globale",
|
||||
"globalControlAdapterLayer": "$t(controlnet.controlAdapter_one) - $t(unifiedCanvas.layer) Globale",
|
||||
"globalIPAdapter": "$t(common.ipAdapter) Globale",
|
||||
"globalIPAdapterLayer": "$t(common.ipAdapter) - $t(unifiedCanvas.layer) Globale",
|
||||
"globalInitialImage": "Immagine iniziale",
|
||||
"globalInitialImageLayer": "$t(controlLayers.globalInitialImage) - $t(unifiedCanvas.layer) Globale",
|
||||
"clearProcessor": "Cancella processore",
|
||||
"resetProcessor": "Ripristina il processore alle impostazioni predefinite",
|
||||
"noLayersAdded": "Nessun livello aggiunto",
|
||||
"resetRegion": "Reimposta la regione",
|
||||
"controlLayers": "Livelli di controllo",
|
||||
"layers_one": "Livello",
|
||||
"layers_many": "Livelli",
|
||||
"layers_other": "Livelli"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "Generazione",
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"canvas": "Tela",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"workflows": "Flussi di lavoro",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"models": "Modelli",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"queue": "Coda",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -570,7 +570,6 @@
|
||||
"pauseSucceeded": "処理が一時停止されました",
|
||||
"queueFront": "キューの先頭へ追加",
|
||||
"queueBack": "キューに追加",
|
||||
"queueCountPrediction": "{{promptsCount}} プロンプト × {{iterations}} イテレーション -> {{count}} 枚生成",
|
||||
"pause": "一時停止",
|
||||
"queue": "キュー",
|
||||
"pauseTooltip": "処理を一時停止",
|
||||
|
||||
@@ -505,7 +505,6 @@
|
||||
"completed": "완성된",
|
||||
"queueBack": "Queue에 추가",
|
||||
"cancelFailed": "항목 취소 중 발생한 문제",
|
||||
"queueCountPrediction": "Queue에 {{predicted}} 추가",
|
||||
"batchQueued": "Batch Queued",
|
||||
"pauseFailed": "프로세서 중지 중 발생한 문제",
|
||||
"clearFailed": "Queue 제거 중 발생한 문제",
|
||||
|
||||
@@ -383,8 +383,6 @@
|
||||
"useCpuNoise": "Gebruik CPU-ruis",
|
||||
"imageActions": "Afbeeldingshandeling",
|
||||
"iterations": "Iteraties",
|
||||
"iterationsWithCount_one": "{{count}} iteratie",
|
||||
"iterationsWithCount_other": "{{count}} iteraties",
|
||||
"coherenceMode": "Modus"
|
||||
},
|
||||
"settings": {
|
||||
@@ -940,7 +938,6 @@
|
||||
"completed": "Voltooid",
|
||||
"queueBack": "Voeg toe aan wachtrij",
|
||||
"cancelFailed": "Fout bij annuleren onderdeel",
|
||||
"queueCountPrediction": "Voeg {{predicted}} toe aan wachtrij",
|
||||
"batchQueued": "Reeks in wachtrij geplaatst",
|
||||
"pauseFailed": "Fout bij onderbreken verwerker",
|
||||
"clearFailed": "Fout bij wissen van wachtrij",
|
||||
|
||||
@@ -76,7 +76,18 @@
|
||||
"localSystem": "Локальная система",
|
||||
"aboutDesc": "Используя Invoke для работы? Проверьте это:",
|
||||
"add": "Добавить",
|
||||
"loglevel": "Уровень логов"
|
||||
"loglevel": "Уровень логов",
|
||||
"beta": "Бета",
|
||||
"selected": "Выбрано",
|
||||
"positivePrompt": "Позитивный запрос",
|
||||
"negativePrompt": "Негативный запрос",
|
||||
"editor": "Редактор",
|
||||
"goTo": "Перейти к",
|
||||
"tab": "Вкладка",
|
||||
"viewing": "Просмотр",
|
||||
"editing": "Редактирование",
|
||||
"viewingDesc": "Просмотр изображений в режиме большой галереи",
|
||||
"editingDesc": "Редактировать на холсте слоёв управления"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Размер изображений",
|
||||
@@ -87,8 +98,8 @@
|
||||
"deleteImagePermanent": "Удаленные изображения невозможно восстановить.",
|
||||
"deleteImageBin": "Удаленные изображения будут отправлены в корзину вашей операционной системы.",
|
||||
"deleteImage_one": "Удалить изображение",
|
||||
"deleteImage_few": "",
|
||||
"deleteImage_many": "",
|
||||
"deleteImage_few": "Удалить {{count}} изображения",
|
||||
"deleteImage_many": "Удалить {{count}} изображений",
|
||||
"assets": "Ресурсы",
|
||||
"autoAssignBoardOnClick": "Авто-назначение доски по клику",
|
||||
"deleteSelection": "Удалить выделенное",
|
||||
@@ -336,6 +347,10 @@
|
||||
"remixImage": {
|
||||
"desc": "Используйте все параметры, кроме сида из текущего изображения",
|
||||
"title": "Ремикс изображения"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Переключить просмотр изображений",
|
||||
"desc": "Переключение между средством просмотра изображений и рабочей областью для текущей вкладки."
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
@@ -512,7 +527,8 @@
|
||||
"missingNodeTemplate": "Отсутствует шаблон узла",
|
||||
"missingFieldTemplate": "Отсутствует шаблон поля",
|
||||
"addingImagesTo": "Добавление изображений в",
|
||||
"invoke": "Создать"
|
||||
"invoke": "Создать",
|
||||
"imageNotProcessedForControlAdapter": "Изображение адаптера контроля №{{number}} не обрабатывается"
|
||||
},
|
||||
"isAllowedToUpscale": {
|
||||
"useX2Model": "Изображение слишком велико для увеличения с помощью модели x4. Используйте модель x2",
|
||||
@@ -523,9 +539,6 @@
|
||||
"useCpuNoise": "Использовать шум CPU",
|
||||
"imageActions": "Действия с изображениями",
|
||||
"iterations": "Кол-во",
|
||||
"iterationsWithCount_one": "{{count}} Интеграция",
|
||||
"iterationsWithCount_few": "{{count}} Итерации",
|
||||
"iterationsWithCount_many": "{{count}} Итераций",
|
||||
"useSize": "Использовать размер",
|
||||
"coherenceMode": "Режим",
|
||||
"aspect": "Соотношение",
|
||||
@@ -541,7 +554,10 @@
|
||||
"infillMosaicTileHeight": "Высота плиток",
|
||||
"infillMosaicMinColor": "Мин цвет",
|
||||
"infillMosaicMaxColor": "Макс цвет",
|
||||
"infillColorValue": "Цвет заливки"
|
||||
"infillColorValue": "Цвет заливки",
|
||||
"globalSettings": "Глобальные настройки",
|
||||
"globalNegativePromptPlaceholder": "Глобальный негативный запрос",
|
||||
"globalPositivePromptPlaceholder": "Глобальный запрос"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Модели",
|
||||
@@ -706,7 +722,9 @@
|
||||
"coherenceModeBoxBlur": "коробчатое размытие",
|
||||
"discardCurrent": "Отбросить текущее",
|
||||
"invertBrushSizeScrollDirection": "Инвертировать прокрутку для размера кисти",
|
||||
"initialFitImageSize": "Подогнать размер изображения при перебросе"
|
||||
"initialFitImageSize": "Подогнать размер изображения при перебросе",
|
||||
"hideBoundingBox": "Скрыть ограничительную рамку",
|
||||
"showBoundingBox": "Показать ограничительную рамку"
|
||||
},
|
||||
"accessibility": {
|
||||
"uploadImage": "Загрузить изображение",
|
||||
@@ -849,7 +867,10 @@
|
||||
"editMode": "Открыть в редакторе узлов",
|
||||
"resetToDefaultValue": "Сбросить к стандартному значкнию",
|
||||
"edit": "Редактировать",
|
||||
"noFieldsViewMode": "В этом рабочем процессе нет выбранных полей для отображения. Просмотрите полный рабочий процесс для настройки значений."
|
||||
"noFieldsViewMode": "В этом рабочем процессе нет выбранных полей для отображения. Просмотрите полный рабочий процесс для настройки значений.",
|
||||
"graph": "График",
|
||||
"showEdgeLabels": "Показать метки на ребрах",
|
||||
"showEdgeLabelsHelp": "Показать метки на ребрах, указывающие на соединенные узлы"
|
||||
},
|
||||
"controlnet": {
|
||||
"amult": "a_mult",
|
||||
@@ -917,8 +938,8 @@
|
||||
"lineartAnime": "Контурный рисунок в стиле аниме",
|
||||
"mediapipeFaceDescription": "Обнаружение лиц с помощью Mediapipe",
|
||||
"hedDescription": "Целостное обнаружение границ",
|
||||
"setControlImageDimensions": "Установите размеры контрольного изображения на Ш/В",
|
||||
"scribble": "каракули",
|
||||
"setControlImageDimensions": "Скопируйте размер в Ш/В (оптимизируйте для модели)",
|
||||
"scribble": "Штрихи",
|
||||
"maxFaces": "Макс Лица",
|
||||
"mlsdDescription": "Минималистичный детектор отрезков линии",
|
||||
"resizeSimple": "Изменить размер (простой)",
|
||||
@@ -933,7 +954,18 @@
|
||||
"small": "Маленький",
|
||||
"body": "Тело",
|
||||
"hands": "Руки",
|
||||
"selectCLIPVisionModel": "Выбрать модель CLIP Vision"
|
||||
"selectCLIPVisionModel": "Выбрать модель CLIP Vision",
|
||||
"ipAdapterMethod": "Метод",
|
||||
"full": "Всё",
|
||||
"mlsd": "M-LSD",
|
||||
"h": "H",
|
||||
"style": "Только стиль",
|
||||
"dwOpenpose": "DW Openpose",
|
||||
"pidi": "PIDI",
|
||||
"composition": "Только композиция",
|
||||
"hed": "HED",
|
||||
"beginEndStepPercentShort": "Начало/конец %",
|
||||
"setControlImageDimensionsForce": "Скопируйте размер в Ш/В (игнорируйте модель)"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Авто добавление Доски",
|
||||
@@ -1312,6 +1344,12 @@
|
||||
"paragraphs": [
|
||||
"Плавно укладывайте изображение вдоль вертикальной оси."
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Метод",
|
||||
"paragraphs": [
|
||||
"Метод, с помощью которого применяется текущий IP-адаптер."
|
||||
]
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
@@ -1359,7 +1397,6 @@
|
||||
"completed": "Выполнено",
|
||||
"queueBack": "Добавить в очередь",
|
||||
"cancelFailed": "Проблема с отменой элемента",
|
||||
"queueCountPrediction": "{{promptsCount}} запросов × {{iterations}} изображений -> {{count}} генераций",
|
||||
"batchQueued": "Пакетная очередь",
|
||||
"pauseFailed": "Проблема с приостановкой рендеринга",
|
||||
"clearFailed": "Проблема с очисткой очереди",
|
||||
@@ -1475,7 +1512,11 @@
|
||||
"projectWorkflows": "Рабочие процессы проекта",
|
||||
"defaultWorkflows": "Стандартные рабочие процессы",
|
||||
"name": "Имя",
|
||||
"noRecentWorkflows": "Нет последних рабочих процессов"
|
||||
"noRecentWorkflows": "Нет последних рабочих процессов",
|
||||
"loadWorkflow": "Рабочий процесс $t(common.load)",
|
||||
"convertGraph": "Конвертировать график",
|
||||
"loadFromGraph": "Загрузка рабочего процесса из графика",
|
||||
"autoLayout": "Автоматическое расположение"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Включить исправление высокого разрешения",
|
||||
@@ -1528,5 +1569,56 @@
|
||||
"addPromptTrigger": "Добавить триггер запроса",
|
||||
"compatibleEmbeddings": "Совместимые встраивания",
|
||||
"noMatchingTriggers": "Нет соответствующих триггеров"
|
||||
},
|
||||
"controlLayers": {
|
||||
"moveToBack": "На задний план",
|
||||
"moveForward": "Переместить вперёд",
|
||||
"moveBackward": "Переместить назад",
|
||||
"brushSize": "Размер кисти",
|
||||
"controlLayers": "Слои управления",
|
||||
"globalMaskOpacity": "Глобальная непрозрачность маски",
|
||||
"autoNegative": "Авто негатив",
|
||||
"deletePrompt": "Удалить запрос",
|
||||
"resetRegion": "Сбросить регион",
|
||||
"debugLayers": "Слои отладки",
|
||||
"rectangle": "Прямоугольник",
|
||||
"maskPreviewColor": "Цвет предпросмотра маски",
|
||||
"addNegativePrompt": "Добавить $t(common.negativePrompt)",
|
||||
"regionalGuidance": "Региональная точность",
|
||||
"opacity": "Непрозрачность",
|
||||
"globalControlAdapter": "Глобальный $t(controlnet.controlAdapter_one)",
|
||||
"globalControlAdapterLayer": "Глобальный $t(controlnet.controlAdapter_one) $t(unifiedCanvas.layer)",
|
||||
"globalIPAdapter": "Глобальный $t(common.ipAdapter)",
|
||||
"globalIPAdapterLayer": "Глобальный $t(common.ipAdapter) $t(unifiedCanvas.layer)",
|
||||
"opacityFilter": "Фильтр непрозрачности",
|
||||
"deleteAll": "Удалить всё",
|
||||
"addLayer": "Добавить слой",
|
||||
"moveToFront": "На передний план",
|
||||
"toggleVisibility": "Переключить видимость слоя",
|
||||
"addPositivePrompt": "Добавить $t(common.positivePrompt)",
|
||||
"addIPAdapter": "Добавить $t(common.ipAdapter)",
|
||||
"regionalGuidanceLayer": "$t(controlLayers.regionalGuidance) $t(unifiedCanvas.layer)",
|
||||
"resetProcessor": "Сброс процессора по умолчанию",
|
||||
"clearProcessor": "Чистый процессор",
|
||||
"globalInitialImage": "Глобальное исходное изображение",
|
||||
"globalInitialImageLayer": "$t(controlLayers.globalInitialImage) $t(unifiedCanvas.layer)",
|
||||
"noLayersAdded": "Без слоев",
|
||||
"layers_one": "Слой",
|
||||
"layers_few": "Слоя",
|
||||
"layers_many": "Слоев"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "Генерация",
|
||||
"canvas": "Холст",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"models": "Модели",
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"workflows": "Рабочие процессы",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"queue": "Очередь"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
"saveAs": "保存为",
|
||||
"ai": "ai",
|
||||
"or": "或",
|
||||
"aboutDesc": "使用 Invoke 工作?查看:",
|
||||
"aboutDesc": "使用 Invoke 工作?来看看:",
|
||||
"add": "添加",
|
||||
"loglevel": "日志级别",
|
||||
"copy": "复制",
|
||||
@@ -445,7 +445,6 @@
|
||||
"useX2Model": "图像太大,无法使用 x4 模型,使用 x2 模型作为替代",
|
||||
"tooLarge": "图像太大无法进行放大,请选择更小的图像"
|
||||
},
|
||||
"iterationsWithCount_other": "{{count}} 次迭代生成",
|
||||
"cfgRescaleMultiplier": "CFG 重缩放倍数",
|
||||
"useSize": "使用尺寸",
|
||||
"setToOptimalSize": "优化模型大小",
|
||||
@@ -853,7 +852,6 @@
|
||||
"pruneSucceeded": "从队列修剪 {{item_count}} 个已完成的项目",
|
||||
"notReady": "无法排队",
|
||||
"batchFailedToQueue": "批次加入队列失败",
|
||||
"queueCountPrediction": "{{promptsCount}} 提示词 × {{iterations}} 迭代次数 -> {{count}} 次生成",
|
||||
"batchQueued": "加入队列的批次",
|
||||
"front": "前",
|
||||
"pruneTooltip": "修剪 {{item_count}} 个已完成的项目",
|
||||
|
||||
@@ -20,15 +20,14 @@ export type LoggerNamespace =
|
||||
| 'models'
|
||||
| 'config'
|
||||
| 'canvas'
|
||||
| 'txt2img'
|
||||
| 'img2img'
|
||||
| 'generation'
|
||||
| 'nodes'
|
||||
| 'system'
|
||||
| 'socketio'
|
||||
| 'session'
|
||||
| 'queue'
|
||||
| 'dnd'
|
||||
| 'regionalPrompts';
|
||||
| 'controlLayers';
|
||||
|
||||
export const logger = (namespace: LoggerNamespace) => $logger.get().child({ namespace });
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ import { addCanvasMaskSavedToGalleryListener } from 'app/store/middleware/listen
|
||||
import { addCanvasMaskToControlNetListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet';
|
||||
import { addCanvasMergedListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMerged';
|
||||
import { addCanvasSavedToGalleryListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasSavedToGallery';
|
||||
import { addControlAdapterPreprocessor } from 'app/store/middleware/listenerMiddleware/listeners/controlAdapterPreprocessor';
|
||||
import { addControlNetAutoProcessListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess';
|
||||
import { addControlNetImageProcessedListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed';
|
||||
import { addEnqueueRequestedCanvasListener } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas';
|
||||
@@ -31,7 +32,6 @@ import { addImagesStarredListener } from 'app/store/middleware/listenerMiddlewar
|
||||
import { addImagesUnstarredListener } from 'app/store/middleware/listenerMiddleware/listeners/imagesUnstarred';
|
||||
import { addImageToDeleteSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/imageToDeleteSelected';
|
||||
import { addImageUploadedFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageUploaded';
|
||||
import { addInitialImageSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/initialImageSelected';
|
||||
import { addModelSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelSelected';
|
||||
import { addModelsLoadedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelsLoaded';
|
||||
import { addDynamicPromptsListener } from 'app/store/middleware/listenerMiddleware/listeners/promptChanged';
|
||||
@@ -72,9 +72,6 @@ const startAppListening = listenerMiddleware.startListening as AppStartListening
|
||||
// Image uploaded
|
||||
addImageUploadedFulfilledListener(startAppListening);
|
||||
|
||||
// Image selected
|
||||
addInitialImageSelectedListener(startAppListening);
|
||||
|
||||
// Image deleted
|
||||
addRequestedSingleImageDeletionListener(startAppListening);
|
||||
addDeleteBoardAndImagesFulfilledListener(startAppListening);
|
||||
@@ -157,3 +154,4 @@ addUpscaleRequestedListener(startAppListening);
|
||||
addDynamicPromptsListener(startAppListening);
|
||||
|
||||
addSetDefaultSettingsListener(startAppListening);
|
||||
addControlAdapterPreprocessor(startAppListening);
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { resetCanvas } from 'features/canvas/store/canvasSlice';
|
||||
import { controlAdaptersReset } from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { allLayersDeleted } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { getImageUsage } from 'features/deleteImageModal/store/selectors';
|
||||
import { nodeEditorReset } from 'features/nodes/store/nodesSlice';
|
||||
import { clearInitialImage } from 'features/parameters/store/generationSlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
export const addDeleteBoardAndImagesFulfilledListener = (startAppListening: AppStartListening) => {
|
||||
@@ -14,19 +14,14 @@ export const addDeleteBoardAndImagesFulfilledListener = (startAppListening: AppS
|
||||
|
||||
// Remove all deleted images from the UI
|
||||
|
||||
let wasInitialImageReset = false;
|
||||
let wasCanvasReset = false;
|
||||
let wasNodeEditorReset = false;
|
||||
let wereControlAdaptersReset = false;
|
||||
let wereControlLayersReset = false;
|
||||
|
||||
const { generation, canvas, nodes, controlAdapters } = getState();
|
||||
const { canvas, nodes, controlAdapters, controlLayers } = getState();
|
||||
deleted_images.forEach((image_name) => {
|
||||
const imageUsage = getImageUsage(generation, canvas, nodes, controlAdapters, image_name);
|
||||
|
||||
if (imageUsage.isInitialImage && !wasInitialImageReset) {
|
||||
dispatch(clearInitialImage());
|
||||
wasInitialImageReset = true;
|
||||
}
|
||||
const imageUsage = getImageUsage(canvas, nodes, controlAdapters, controlLayers.present, image_name);
|
||||
|
||||
if (imageUsage.isCanvasImage && !wasCanvasReset) {
|
||||
dispatch(resetCanvas());
|
||||
@@ -42,6 +37,11 @@ export const addDeleteBoardAndImagesFulfilledListener = (startAppListening: AppS
|
||||
dispatch(controlAdaptersReset());
|
||||
wereControlAdaptersReset = true;
|
||||
}
|
||||
|
||||
if (imageUsage.isControlLayerImage && !wereControlLayersReset) {
|
||||
dispatch(allLayersDeleted());
|
||||
wereControlLayersReset = true;
|
||||
}
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
@@ -0,0 +1,175 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppDispatch } from 'app/store/store';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import {
|
||||
caLayerImageChanged,
|
||||
caLayerModelChanged,
|
||||
caLayerProcessedImageChanged,
|
||||
caLayerProcessorConfigChanged,
|
||||
caLayerProcessorPendingBatchIdChanged,
|
||||
caLayerRecalled,
|
||||
isControlAdapterLayer,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { CA_PROCESSOR_DATA } from 'features/controlLayers/util/controlAdapters';
|
||||
import { isImageOutput } from 'features/nodes/types/common';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { t } from 'i18next';
|
||||
import { getImageDTO } from 'services/api/endpoints/images';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import type { BatchConfig } from 'services/api/types';
|
||||
import { socketInvocationComplete } from 'services/events/actions';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const matcher = isAnyOf(caLayerImageChanged, caLayerProcessorConfigChanged, caLayerModelChanged, caLayerRecalled);
|
||||
|
||||
const DEBOUNCE_MS = 300;
|
||||
const log = logger('session');
|
||||
|
||||
/**
|
||||
* Simple helper to cancel a batch and reset the pending batch ID
|
||||
*/
|
||||
const cancelProcessorBatch = async (dispatch: AppDispatch, layerId: string, batchId: string) => {
|
||||
const req = dispatch(queueApi.endpoints.cancelByBatchIds.initiate({ batch_ids: [batchId] }));
|
||||
log.trace({ batchId }, 'Cancelling existing preprocessor batch');
|
||||
try {
|
||||
await req.unwrap();
|
||||
} catch {
|
||||
// no-op
|
||||
} finally {
|
||||
req.reset();
|
||||
// Always reset the pending batch ID - the cancel req could fail if the batch doesn't exist
|
||||
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: null }));
|
||||
}
|
||||
};
|
||||
|
||||
export const addControlAdapterPreprocessor = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
matcher,
|
||||
effect: async (action, { dispatch, getState, cancelActiveListeners, delay, take, signal }) => {
|
||||
const layerId = caLayerRecalled.match(action) ? action.payload.id : action.payload.layerId;
|
||||
|
||||
// Cancel any in-progress instances of this listener
|
||||
cancelActiveListeners();
|
||||
log.trace('Control Layer CA auto-process triggered');
|
||||
|
||||
// Delay before starting actual work
|
||||
await delay(DEBOUNCE_MS);
|
||||
|
||||
// Double-check that we are still eligible for processing
|
||||
const state = getState();
|
||||
const layer = state.controlLayers.present.layers.filter(isControlAdapterLayer).find((l) => l.id === layerId);
|
||||
|
||||
// If we have no image or there is no processor config, bail
|
||||
if (!layer) {
|
||||
return;
|
||||
}
|
||||
|
||||
const image = layer.controlAdapter.image;
|
||||
const config = layer.controlAdapter.processorConfig;
|
||||
|
||||
if (!image || !config) {
|
||||
// The user has reset the image or config, so we should clear the processed image
|
||||
dispatch(caLayerProcessedImageChanged({ layerId, imageDTO: null }));
|
||||
}
|
||||
|
||||
// At this point, the user has stopped fiddling with the processor settings and there is a processor selected.
|
||||
|
||||
// If there is a pending processor batch, cancel it.
|
||||
if (layer.controlAdapter.processorPendingBatchId) {
|
||||
cancelProcessorBatch(dispatch, layerId, layer.controlAdapter.processorPendingBatchId);
|
||||
}
|
||||
|
||||
// @ts-expect-error: TS isn't able to narrow the typing of buildNode and `config` will error...
|
||||
const processorNode = CA_PROCESSOR_DATA[config.type].buildNode(image, config);
|
||||
const enqueueBatchArg: BatchConfig = {
|
||||
prepend: true,
|
||||
batch: {
|
||||
graph: {
|
||||
nodes: {
|
||||
[processorNode.id]: {
|
||||
...processorNode,
|
||||
// Control images are always intermediate - do not save to gallery
|
||||
is_intermediate: true,
|
||||
},
|
||||
},
|
||||
edges: [],
|
||||
},
|
||||
runs: 1,
|
||||
},
|
||||
};
|
||||
|
||||
// Kick off the processor batch
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(enqueueBatchArg, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
|
||||
try {
|
||||
const enqueueResult = await req.unwrap();
|
||||
// TODO(psyche): Update the pydantic models, pretty sure we will _always_ have a batch_id here, but the model says it's optional
|
||||
assert(enqueueResult.batch.batch_id, 'Batch ID not returned from queue');
|
||||
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: enqueueResult.batch.batch_id }));
|
||||
log.debug({ enqueueResult: parseify(enqueueResult) }, t('queue.graphQueued'));
|
||||
|
||||
// Wait for the processor node to complete
|
||||
const [invocationCompleteAction] = await take(
|
||||
(action): action is ReturnType<typeof socketInvocationComplete> =>
|
||||
socketInvocationComplete.match(action) &&
|
||||
action.payload.data.queue_batch_id === enqueueResult.batch.batch_id &&
|
||||
action.payload.data.source_node_id === processorNode.id
|
||||
);
|
||||
|
||||
// We still have to check the output type
|
||||
assert(
|
||||
isImageOutput(invocationCompleteAction.payload.data.result),
|
||||
`Processor did not return an image output, got: ${invocationCompleteAction.payload.data.result}`
|
||||
);
|
||||
const { image_name } = invocationCompleteAction.payload.data.result.image;
|
||||
|
||||
const imageDTO = await getImageDTO(image_name);
|
||||
assert(imageDTO, "Failed to fetch processor output's image DTO");
|
||||
|
||||
// Whew! We made it. Update the layer with the processed image
|
||||
log.debug({ layerId, imageDTO }, 'ControlNet image processed');
|
||||
dispatch(caLayerProcessedImageChanged({ layerId, imageDTO }));
|
||||
dispatch(caLayerProcessorPendingBatchIdChanged({ layerId, batchId: null }));
|
||||
} catch (error) {
|
||||
if (signal.aborted) {
|
||||
// The listener was canceled - we need to cancel the pending processor batch, if there is one (could have changed by now).
|
||||
const pendingBatchId = getState()
|
||||
.controlLayers.present.layers.filter(isControlAdapterLayer)
|
||||
.find((l) => l.id === layerId)?.controlAdapter.processorPendingBatchId;
|
||||
if (pendingBatchId) {
|
||||
cancelProcessorBatch(dispatch, layerId, pendingBatchId);
|
||||
}
|
||||
log.trace('Control Adapter preprocessor cancelled');
|
||||
} else {
|
||||
// Some other error condition...
|
||||
console.log(error);
|
||||
log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue'));
|
||||
|
||||
if (error instanceof Object) {
|
||||
if ('data' in error && 'status' in error) {
|
||||
if (error.status === 403) {
|
||||
dispatch(caLayerImageChanged({ layerId, imageDTO: null }));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dispatch(
|
||||
addToast({
|
||||
title: t('queue.graphFailedToQueue'),
|
||||
status: 'error',
|
||||
})
|
||||
);
|
||||
}
|
||||
} finally {
|
||||
req.reset();
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
@@ -30,7 +30,7 @@ import type { ImageDTO } from 'services/api/types';
|
||||
export const addEnqueueRequestedCanvasListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
|
||||
enqueueRequested.match(action) && action.payload.tabName === 'unifiedCanvas',
|
||||
enqueueRequested.match(action) && action.payload.tabName === 'canvas',
|
||||
effect: async (action, { getState, dispatch }) => {
|
||||
const log = logger('queue');
|
||||
const { prepend } = action.payload;
|
||||
|
||||
@@ -1,35 +1,27 @@
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { isImageViewerOpenChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { buildGenerationTabGraph } from 'features/nodes/util/graph/buildGenerationTabGraph';
|
||||
import { buildGenerationTabSDXLGraph } from 'features/nodes/util/graph/buildGenerationTabSDXLGraph';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildLinearImageToImageGraph } from 'features/nodes/util/graph/buildLinearImageToImageGraph';
|
||||
import { buildLinearSDXLImageToImageGraph } from 'features/nodes/util/graph/buildLinearSDXLImageToImageGraph';
|
||||
import { buildLinearSDXLTextToImageGraph } from 'features/nodes/util/graph/buildLinearSDXLTextToImageGraph';
|
||||
import { buildLinearTextToImageGraph } from 'features/nodes/util/graph/buildLinearTextToImageGraph';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
|
||||
export const addEnqueueRequestedLinear = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
|
||||
enqueueRequested.match(action) && (action.payload.tabName === 'txt2img' || action.payload.tabName === 'img2img'),
|
||||
enqueueRequested.match(action) && action.payload.tabName === 'generation',
|
||||
effect: async (action, { getState, dispatch }) => {
|
||||
const state = getState();
|
||||
const { shouldShowProgressInViewer } = state.ui;
|
||||
const model = state.generation.model;
|
||||
const { prepend } = action.payload;
|
||||
|
||||
let graph;
|
||||
|
||||
if (model && model.base === 'sdxl') {
|
||||
if (action.payload.tabName === 'txt2img') {
|
||||
graph = await buildLinearSDXLTextToImageGraph(state);
|
||||
} else {
|
||||
graph = await buildLinearSDXLImageToImageGraph(state);
|
||||
}
|
||||
graph = await buildGenerationTabSDXLGraph(state);
|
||||
} else {
|
||||
if (action.payload.tabName === 'txt2img') {
|
||||
graph = await buildLinearTextToImageGraph(state);
|
||||
} else {
|
||||
graph = await buildLinearImageToImageGraph(state);
|
||||
}
|
||||
graph = await buildGenerationTabGraph(state);
|
||||
}
|
||||
|
||||
const batchConfig = prepareLinearUIBatch(state, graph, prepend);
|
||||
@@ -39,7 +31,14 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
req.reset();
|
||||
try {
|
||||
await req.unwrap();
|
||||
if (shouldShowProgressInViewer) {
|
||||
dispatch(isImageViewerOpenChanged(true));
|
||||
}
|
||||
} finally {
|
||||
req.reset();
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -8,7 +8,7 @@ import type { BatchConfig } from 'services/api/types';
|
||||
export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
|
||||
enqueueRequested.match(action) && action.payload.tabName === 'nodes',
|
||||
enqueueRequested.match(action) && action.payload.tabName === 'workflows',
|
||||
effect: async (action, { getState, dispatch }) => {
|
||||
const state = getState();
|
||||
const { nodes, edges } = state.nodes;
|
||||
@@ -39,7 +39,11 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
req.reset();
|
||||
try {
|
||||
await req.unwrap();
|
||||
} finally {
|
||||
req.reset();
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppDispatch, RootState } from 'app/store/store';
|
||||
import { resetCanvas } from 'features/canvas/store/canvasSlice';
|
||||
import {
|
||||
controlAdapterImageChanged,
|
||||
@@ -7,6 +8,13 @@ import {
|
||||
selectControlAdapterAll,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
|
||||
import {
|
||||
isControlAdapterLayer,
|
||||
isInitialImageLayer,
|
||||
isIPAdapterLayer,
|
||||
isRegionalGuidanceLayer,
|
||||
layerDeleted,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { imageDeletionConfirmed } from 'features/deleteImageModal/store/actions';
|
||||
import { isModalOpenChanged } from 'features/deleteImageModal/store/slice';
|
||||
import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
@@ -14,12 +22,82 @@ import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { isImageFieldInputInstance } from 'features/nodes/types/field';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { clearInitialImage } from 'features/parameters/store/generationSlice';
|
||||
import { clamp, forEach } from 'lodash-es';
|
||||
import { api } from 'services/api';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
import { imagesSelectors } from 'services/api/util';
|
||||
|
||||
const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
|
||||
state.nodes.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
forEach(node.data.inputs, (input) => {
|
||||
if (isImageFieldInputInstance(input) && input.value?.image_name === imageDTO.image_name) {
|
||||
dispatch(
|
||||
fieldImageValueChanged({
|
||||
nodeId: node.data.id,
|
||||
fieldName: input.name,
|
||||
value: undefined,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
const deleteControlAdapterImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
|
||||
forEach(selectControlAdapterAll(state.controlAdapters), (ca) => {
|
||||
if (
|
||||
ca.controlImage === imageDTO.image_name ||
|
||||
(isControlNetOrT2IAdapter(ca) && ca.processedControlImage === imageDTO.image_name)
|
||||
) {
|
||||
dispatch(
|
||||
controlAdapterImageChanged({
|
||||
id: ca.id,
|
||||
controlImage: null,
|
||||
})
|
||||
);
|
||||
dispatch(
|
||||
controlAdapterProcessedImageChanged({
|
||||
id: ca.id,
|
||||
processedControlImage: null,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const deleteControlLayerImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
|
||||
state.controlLayers.present.layers.forEach((l) => {
|
||||
if (isRegionalGuidanceLayer(l)) {
|
||||
if (l.ipAdapters.some((ipa) => ipa.image?.name === imageDTO.image_name)) {
|
||||
dispatch(layerDeleted(l.id));
|
||||
}
|
||||
}
|
||||
if (isControlAdapterLayer(l)) {
|
||||
if (
|
||||
l.controlAdapter.image?.name === imageDTO.image_name ||
|
||||
l.controlAdapter.processedImage?.name === imageDTO.image_name
|
||||
) {
|
||||
dispatch(layerDeleted(l.id));
|
||||
}
|
||||
}
|
||||
if (isIPAdapterLayer(l)) {
|
||||
if (l.ipAdapter.image?.name === imageDTO.image_name) {
|
||||
dispatch(layerDeleted(l.id));
|
||||
}
|
||||
}
|
||||
if (isInitialImageLayer(l)) {
|
||||
if (l.image?.name === imageDTO.image_name) {
|
||||
dispatch(layerDeleted(l.id));
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
export const addRequestedSingleImageDeletionListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
actionCreator: imageDeletionConfirmed,
|
||||
@@ -73,50 +151,9 @@ export const addRequestedSingleImageDeletionListener = (startAppListening: AppSt
|
||||
}
|
||||
|
||||
imageDTOs.forEach((imageDTO) => {
|
||||
// reset init image if we deleted it
|
||||
if (getState().generation.initialImage?.imageName === imageDTO.image_name) {
|
||||
dispatch(clearInitialImage());
|
||||
}
|
||||
|
||||
// reset control adapters that use the deleted images
|
||||
forEach(selectControlAdapterAll(getState().controlAdapters), (ca) => {
|
||||
if (
|
||||
ca.controlImage === imageDTO.image_name ||
|
||||
(isControlNetOrT2IAdapter(ca) && ca.processedControlImage === imageDTO.image_name)
|
||||
) {
|
||||
dispatch(
|
||||
controlAdapterImageChanged({
|
||||
id: ca.id,
|
||||
controlImage: null,
|
||||
})
|
||||
);
|
||||
dispatch(
|
||||
controlAdapterProcessedImageChanged({
|
||||
id: ca.id,
|
||||
processedControlImage: null,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
// reset nodes that use the deleted images
|
||||
getState().nodes.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
forEach(node.data.inputs, (input) => {
|
||||
if (isImageFieldInputInstance(input) && input.value?.image_name === imageDTO.image_name) {
|
||||
dispatch(
|
||||
fieldImageValueChanged({
|
||||
nodeId: node.data.id,
|
||||
fieldName: input.name,
|
||||
value: undefined,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
});
|
||||
deleteControlAdapterImages(state, dispatch, imageDTO);
|
||||
deleteNodesImages(state, dispatch, imageDTO);
|
||||
deleteControlLayerImages(state, dispatch, imageDTO);
|
||||
});
|
||||
|
||||
// Delete from server
|
||||
@@ -168,50 +205,9 @@ export const addRequestedSingleImageDeletionListener = (startAppListening: AppSt
|
||||
}
|
||||
|
||||
imageDTOs.forEach((imageDTO) => {
|
||||
// reset init image if we deleted it
|
||||
if (getState().generation.initialImage?.imageName === imageDTO.image_name) {
|
||||
dispatch(clearInitialImage());
|
||||
}
|
||||
|
||||
// reset control adapters that use the deleted images
|
||||
forEach(selectControlAdapterAll(getState().controlAdapters), (ca) => {
|
||||
if (
|
||||
ca.controlImage === imageDTO.image_name ||
|
||||
(isControlNetOrT2IAdapter(ca) && ca.processedControlImage === imageDTO.image_name)
|
||||
) {
|
||||
dispatch(
|
||||
controlAdapterImageChanged({
|
||||
id: ca.id,
|
||||
controlImage: null,
|
||||
})
|
||||
);
|
||||
dispatch(
|
||||
controlAdapterProcessedImageChanged({
|
||||
id: ca.id,
|
||||
processedControlImage: null,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
// reset nodes that use the deleted images
|
||||
getState().nodes.nodes.forEach((node) => {
|
||||
if (!isInvocationNode(node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
forEach(node.data.inputs, (input) => {
|
||||
if (isImageFieldInputInstance(input) && input.value?.image_name === imageDTO.image_name) {
|
||||
dispatch(
|
||||
fieldImageValueChanged({
|
||||
nodeId: node.data.id,
|
||||
fieldName: input.name,
|
||||
value: undefined,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
});
|
||||
deleteControlAdapterImages(state, dispatch, imageDTO);
|
||||
deleteNodesImages(state, dispatch, imageDTO);
|
||||
deleteControlLayerImages(state, dispatch, imageDTO);
|
||||
});
|
||||
} catch {
|
||||
// no-op
|
||||
|
||||
@@ -7,10 +7,16 @@ import {
|
||||
controlAdapterImageChanged,
|
||||
controlAdapterIsEnabledChanged,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import {
|
||||
caLayerImageChanged,
|
||||
iiLayerImageChanged,
|
||||
ipaLayerImageChanged,
|
||||
rgLayerIPAdapterImageChanged,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { initialImageChanged, selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
export const dndDropped = createAction<{
|
||||
@@ -47,18 +53,6 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on initial image
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SET_INITIAL_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
dispatch(initialImageChanged(activeData.payload.imageDTO));
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on ControlNet
|
||||
*/
|
||||
@@ -83,6 +77,79 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on Control Adapter Layer
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SET_CA_LAYER_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { layerId } = overData.context;
|
||||
dispatch(
|
||||
caLayerImageChanged({
|
||||
layerId,
|
||||
imageDTO: activeData.payload.imageDTO,
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on IP Adapter Layer
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SET_IPA_LAYER_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { layerId } = overData.context;
|
||||
dispatch(
|
||||
ipaLayerImageChanged({
|
||||
layerId,
|
||||
imageDTO: activeData.payload.imageDTO,
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on RG Layer IP Adapter
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SET_RG_LAYER_IP_ADAPTER_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { layerId, ipAdapterId } = overData.context;
|
||||
dispatch(
|
||||
rgLayerIPAdapterImageChanged({
|
||||
layerId,
|
||||
ipAdapterId,
|
||||
imageDTO: activeData.payload.imageDTO,
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on II Layer Image
|
||||
*/
|
||||
if (
|
||||
overData.actionType === 'SET_II_LAYER_IMAGE' &&
|
||||
activeData.payloadType === 'IMAGE_DTO' &&
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { layerId } = overData.context;
|
||||
dispatch(
|
||||
iiLayerImageChanged({
|
||||
layerId,
|
||||
imageDTO: activeData.payload.imageDTO,
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Image dropped on Canvas
|
||||
*/
|
||||
|
||||
@@ -14,7 +14,6 @@ export const addImageToDeleteSelectedListener = (startAppListening: AppStartList
|
||||
|
||||
const isImageInUse =
|
||||
imagesUsage.some((i) => i.isCanvasImage) ||
|
||||
imagesUsage.some((i) => i.isInitialImage) ||
|
||||
imagesUsage.some((i) => i.isControlImage) ||
|
||||
imagesUsage.some((i) => i.isNodesImage);
|
||||
|
||||
|
||||
@@ -6,8 +6,14 @@ import {
|
||||
controlAdapterImageChanged,
|
||||
controlAdapterIsEnabledChanged,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import {
|
||||
caLayerImageChanged,
|
||||
iiLayerImageChanged,
|
||||
ipaLayerImageChanged,
|
||||
rgLayerIPAdapterImageChanged,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { initialImageChanged, selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { t } from 'i18next';
|
||||
import { omit } from 'lodash-es';
|
||||
@@ -108,15 +114,48 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_INITIAL_IMAGE') {
|
||||
dispatch(initialImageChanged(imageDTO));
|
||||
if (postUploadAction?.type === 'SET_CA_LAYER_IMAGE') {
|
||||
const { layerId } = postUploadAction;
|
||||
dispatch(caLayerImageChanged({ layerId, imageDTO }));
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setInitialImage'),
|
||||
description: t('toast.setControlImage'),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_IPA_LAYER_IMAGE') {
|
||||
const { layerId } = postUploadAction;
|
||||
dispatch(ipaLayerImageChanged({ layerId, imageDTO }));
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_RG_LAYER_IP_ADAPTER_IMAGE') {
|
||||
const { layerId, ipAdapterId } = postUploadAction;
|
||||
dispatch(rgLayerIPAdapterImageChanged({ layerId, ipAdapterId, imageDTO }));
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_II_LAYER_IMAGE') {
|
||||
const { layerId } = postUploadAction;
|
||||
dispatch(iiLayerImageChanged({ layerId, imageDTO }));
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setControlImage'),
|
||||
})
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_NODES_IMAGE') {
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { initialImageSelected } from 'features/parameters/store/actions';
|
||||
import { initialImageChanged } from 'features/parameters/store/generationSlice';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { makeToast } from 'features/system/util/makeToast';
|
||||
import { t } from 'i18next';
|
||||
|
||||
export const addInitialImageSelectedListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
actionCreator: initialImageSelected,
|
||||
effect: (action, { dispatch }) => {
|
||||
if (!action.payload) {
|
||||
dispatch(addToast(makeToast({ title: t('toast.imageNotLoadedDesc'), status: 'error' })));
|
||||
return;
|
||||
}
|
||||
|
||||
dispatch(initialImageChanged(action.payload));
|
||||
dispatch(addToast(makeToast(t('toast.sentToImageToImage'))));
|
||||
},
|
||||
});
|
||||
};
|
||||
@@ -6,9 +6,10 @@ import {
|
||||
controlAdapterModelCleared,
|
||||
selectControlAdapterAll,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { loraRemoved } from 'features/lora/store/loraSlice';
|
||||
import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize';
|
||||
import { heightChanged, modelChanged, vaeSelected, widthChanged } from 'features/parameters/store/generationSlice';
|
||||
import { modelChanged, vaeSelected } from 'features/parameters/store/generationSlice';
|
||||
import { zParameterModel, zParameterVAEModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { getIsSizeOptimal, getOptimalDimension } from 'features/parameters/util/optimalDimension';
|
||||
import { refinerModelChanged } from 'features/sdxl/store/sdxlSlice';
|
||||
@@ -69,16 +70,22 @@ const handleMainModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
dispatch(modelChanged(defaultModelInList, currentModel));
|
||||
|
||||
const optimalDimension = getOptimalDimension(defaultModelInList);
|
||||
if (getIsSizeOptimal(state.generation.width, state.generation.height, optimalDimension)) {
|
||||
if (
|
||||
getIsSizeOptimal(
|
||||
state.controlLayers.present.size.width,
|
||||
state.controlLayers.present.size.height,
|
||||
optimalDimension
|
||||
)
|
||||
) {
|
||||
return;
|
||||
}
|
||||
const { width, height } = calculateNewSize(
|
||||
state.generation.aspectRatio.value,
|
||||
state.controlLayers.present.size.aspectRatio.value,
|
||||
optimalDimension * optimalDimension
|
||||
);
|
||||
|
||||
dispatch(widthChanged(width));
|
||||
dispatch(heightChanged(height));
|
||||
dispatch(widthChanged({ width }));
|
||||
dispatch(heightChanged({ height }));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { positivePromptChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import {
|
||||
combinatorialToggled,
|
||||
isErrorChanged,
|
||||
@@ -10,11 +11,16 @@ import {
|
||||
promptsChanged,
|
||||
} from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
|
||||
import { setPositivePrompt } from 'features/parameters/store/generationSlice';
|
||||
import { utilitiesApi } from 'services/api/endpoints/utilities';
|
||||
import { socketConnected } from 'services/events/actions';
|
||||
|
||||
const matcher = isAnyOf(setPositivePrompt, combinatorialToggled, maxPromptsChanged, maxPromptsReset, socketConnected);
|
||||
const matcher = isAnyOf(
|
||||
positivePromptChanged,
|
||||
combinatorialToggled,
|
||||
maxPromptsChanged,
|
||||
maxPromptsReset,
|
||||
socketConnected
|
||||
);
|
||||
|
||||
export const addDynamicPromptsListener = (startAppListening: AppStartListening) => {
|
||||
startAppListening({
|
||||
@@ -22,7 +28,7 @@ export const addDynamicPromptsListener = (startAppListening: AppStartListening)
|
||||
effect: async (action, { dispatch, getState, cancelActiveListeners, delay }) => {
|
||||
cancelActiveListeners();
|
||||
const state = getState();
|
||||
const { positivePrompt } = state.generation;
|
||||
const { positivePrompt } = state.controlLayers.present;
|
||||
const { maxPrompts } = state.dynamicPrompts;
|
||||
|
||||
if (state.config.disabledFeatures.includes('dynamicPrompting')) {
|
||||
@@ -32,7 +38,7 @@ export const addDynamicPromptsListener = (startAppListening: AppStartListening)
|
||||
const cachedPrompts = utilitiesApi.endpoints.dynamicPrompts.select({
|
||||
prompt: positivePrompt,
|
||||
max_prompts: maxPrompts,
|
||||
})(getState()).data;
|
||||
})(state).data;
|
||||
|
||||
if (cachedPrompts) {
|
||||
dispatch(promptsChanged(cachedPrompts.prompts));
|
||||
@@ -40,8 +46,8 @@ export const addDynamicPromptsListener = (startAppListening: AppStartListening)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!getShouldProcessPrompt(state.generation.positivePrompt)) {
|
||||
dispatch(promptsChanged([state.generation.positivePrompt]));
|
||||
if (!getShouldProcessPrompt(positivePrompt)) {
|
||||
dispatch(promptsChanged([positivePrompt]));
|
||||
dispatch(parsingErrorChanged(undefined));
|
||||
dispatch(isErrorChanged(false));
|
||||
return;
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { setDefaultSettings } from 'features/parameters/store/actions';
|
||||
import {
|
||||
heightRecalled,
|
||||
setCfgRescaleMultiplier,
|
||||
setCfgScale,
|
||||
setScheduler,
|
||||
setSteps,
|
||||
vaePrecisionChanged,
|
||||
vaeSelected,
|
||||
widthRecalled,
|
||||
} from 'features/parameters/store/generationSlice';
|
||||
import {
|
||||
isParameterCFGRescaleMultiplier,
|
||||
@@ -97,16 +96,16 @@ export const addSetDefaultSettingsListener = (startAppListening: AppStartListeni
|
||||
dispatch(setScheduler(scheduler));
|
||||
}
|
||||
}
|
||||
|
||||
const setSizeOptions = { updateAspectRatio: true, clamp: true };
|
||||
if (width) {
|
||||
if (isParameterWidth(width)) {
|
||||
dispatch(widthRecalled(width));
|
||||
dispatch(widthChanged({ width, ...setSizeOptions }));
|
||||
}
|
||||
}
|
||||
|
||||
if (height) {
|
||||
if (isParameterHeight(height)) {
|
||||
dispatch(heightRecalled(height));
|
||||
dispatch(heightChanged({ height, ...setSizeOptions }));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,12 @@ import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
|
||||
import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import {
|
||||
boardIdSelected,
|
||||
galleryViewChanged,
|
||||
imageSelected,
|
||||
isImageViewerOpenChanged,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import { IMAGE_CATEGORIES } from 'features/gallery/store/types';
|
||||
import { isImageOutput } from 'features/nodes/types/common';
|
||||
import { CANVAS_OUTPUT } from 'features/nodes/util/graph/constants';
|
||||
@@ -101,6 +106,7 @@ export const addInvocationCompleteEventListener = (startAppListening: AppStartLi
|
||||
}
|
||||
|
||||
dispatch(imageSelected(imageDTO));
|
||||
dispatch(isImageViewerOpenChanged(true));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,11 @@ import {
|
||||
controlAdaptersPersistConfig,
|
||||
controlAdaptersSlice,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import {
|
||||
controlLayersPersistConfig,
|
||||
controlLayersSlice,
|
||||
controlLayersUndoableConfig,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { deleteImageModalSlice } from 'features/deleteImageModal/store/slice';
|
||||
import { dynamicPromptsPersistConfig, dynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { galleryPersistConfig, gallerySlice } from 'features/gallery/store/gallerySlice';
|
||||
@@ -21,11 +26,6 @@ import { workflowPersistConfig, workflowSlice } from 'features/nodes/store/workf
|
||||
import { generationPersistConfig, generationSlice } from 'features/parameters/store/generationSlice';
|
||||
import { postprocessingPersistConfig, postprocessingSlice } from 'features/parameters/store/postprocessingSlice';
|
||||
import { queueSlice } from 'features/queue/store/queueSlice';
|
||||
import {
|
||||
regionalPromptsPersistConfig,
|
||||
regionalPromptsSlice,
|
||||
regionalPromptsUndoableConfig,
|
||||
} from 'features/regionalPrompts/store/regionalPromptsSlice';
|
||||
import { sdxlPersistConfig, sdxlSlice } from 'features/sdxl/store/sdxlSlice';
|
||||
import { configSlice } from 'features/system/store/configSlice';
|
||||
import { systemPersistConfig, systemSlice } from 'features/system/store/systemSlice';
|
||||
@@ -65,7 +65,7 @@ const allReducers = {
|
||||
[queueSlice.name]: queueSlice.reducer,
|
||||
[workflowSlice.name]: workflowSlice.reducer,
|
||||
[hrfSlice.name]: hrfSlice.reducer,
|
||||
[regionalPromptsSlice.name]: undoable(regionalPromptsSlice.reducer, regionalPromptsUndoableConfig),
|
||||
[controlLayersSlice.name]: undoable(controlLayersSlice.reducer, controlLayersUndoableConfig),
|
||||
[api.reducerPath]: api.reducer,
|
||||
};
|
||||
|
||||
@@ -110,7 +110,7 @@ const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = {
|
||||
[loraPersistConfig.name]: loraPersistConfig,
|
||||
[modelManagerV2PersistConfig.name]: modelManagerV2PersistConfig,
|
||||
[hrfPersistConfig.name]: hrfPersistConfig,
|
||||
[regionalPromptsPersistConfig.name]: regionalPromptsPersistConfig,
|
||||
[controlLayersPersistConfig.name]: controlLayersPersistConfig,
|
||||
};
|
||||
|
||||
const unserialize: UnserializeFunction = (data, key) => {
|
||||
|
||||
@@ -70,6 +70,7 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
onMouseOver,
|
||||
onMouseOut,
|
||||
dataTestId,
|
||||
...rest
|
||||
} = props;
|
||||
|
||||
const [isHovered, setIsHovered] = useState(false);
|
||||
@@ -138,6 +139,7 @@ const IAIDndImage = (props: IAIDndImageProps) => {
|
||||
minH={minSize ? minSize : undefined}
|
||||
userSelect="none"
|
||||
cursor={isDragDisabled || !imageDTO ? 'default' : 'pointer'}
|
||||
{...rest}
|
||||
>
|
||||
{imageDTO && (
|
||||
<Flex
|
||||
|
||||
@@ -17,14 +17,10 @@ const accept: Accept = {
|
||||
const selectPostUploadAction = createMemoizedSelector(activeTabNameSelector, (activeTabName) => {
|
||||
let postUploadAction: PostUploadAction = { type: 'TOAST' };
|
||||
|
||||
if (activeTabName === 'unifiedCanvas') {
|
||||
if (activeTabName === 'canvas') {
|
||||
postUploadAction = { type: 'SET_CANVAS_INITIAL_IMAGE' };
|
||||
}
|
||||
|
||||
if (activeTabName === 'img2img') {
|
||||
postUploadAction = { type: 'SET_INITIAL_IMAGE' };
|
||||
}
|
||||
|
||||
return postUploadAction;
|
||||
});
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ export const useGlobalHotkeys = () => {
|
||||
useHotkeys(
|
||||
'1',
|
||||
() => {
|
||||
dispatch(setActiveTab('txt2img'));
|
||||
dispatch(setActiveTab('generation'));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
@@ -75,7 +75,7 @@ export const useGlobalHotkeys = () => {
|
||||
useHotkeys(
|
||||
'2',
|
||||
() => {
|
||||
dispatch(setActiveTab('img2img'));
|
||||
dispatch(setActiveTab('canvas'));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
@@ -83,31 +83,23 @@ export const useGlobalHotkeys = () => {
|
||||
useHotkeys(
|
||||
'3',
|
||||
() => {
|
||||
dispatch(setActiveTab('unifiedCanvas'));
|
||||
dispatch(setActiveTab('workflows'));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
useHotkeys(
|
||||
'4',
|
||||
() => {
|
||||
dispatch(setActiveTab('nodes'));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
useHotkeys(
|
||||
'5',
|
||||
() => {
|
||||
if (isModelManagerEnabled) {
|
||||
dispatch(setActiveTab('modelManager'));
|
||||
dispatch(setActiveTab('models'));
|
||||
}
|
||||
},
|
||||
[dispatch, isModelManagerEnabled]
|
||||
);
|
||||
|
||||
useHotkeys(
|
||||
isModelManagerEnabled ? '6' : '5',
|
||||
isModelManagerEnabled ? '5' : '4',
|
||||
() => {
|
||||
dispatch(setActiveTab('queue'));
|
||||
},
|
||||
|
||||
@@ -13,6 +13,7 @@ type UseGroupedModelComboboxArg<T extends AnyModelConfig> = {
|
||||
onChange: (value: T | null) => void;
|
||||
getIsDisabled?: (model: T) => boolean;
|
||||
isLoading?: boolean;
|
||||
groupByType?: boolean;
|
||||
};
|
||||
|
||||
type UseGroupedModelComboboxReturn = {
|
||||
@@ -23,17 +24,21 @@ type UseGroupedModelComboboxReturn = {
|
||||
noOptionsMessage: () => string;
|
||||
};
|
||||
|
||||
const groupByBaseFunc = <T extends AnyModelConfig>(model: T) => model.base.toUpperCase();
|
||||
const groupByBaseAndTypeFunc = <T extends AnyModelConfig>(model: T) =>
|
||||
`${model.base.toUpperCase()} / ${model.type.replaceAll('_', ' ').toUpperCase()}`;
|
||||
|
||||
export const useGroupedModelCombobox = <T extends AnyModelConfig>(
|
||||
arg: UseGroupedModelComboboxArg<T>
|
||||
): UseGroupedModelComboboxReturn => {
|
||||
const { t } = useTranslation();
|
||||
const base_model = useAppSelector((s) => s.generation.model?.base ?? 'sdxl');
|
||||
const { modelConfigs, selectedModel, getIsDisabled, onChange, isLoading } = arg;
|
||||
const { modelConfigs, selectedModel, getIsDisabled, onChange, isLoading, groupByType = false } = arg;
|
||||
const options = useMemo<GroupBase<ComboboxOption>[]>(() => {
|
||||
if (!modelConfigs) {
|
||||
return [];
|
||||
}
|
||||
const groupedModels = groupBy(modelConfigs, 'base');
|
||||
const groupedModels = groupBy(modelConfigs, groupByType ? groupByBaseAndTypeFunc : groupByBaseFunc);
|
||||
const _options = reduce(
|
||||
groupedModels,
|
||||
(acc, val, label) => {
|
||||
@@ -49,9 +54,9 @@ export const useGroupedModelCombobox = <T extends AnyModelConfig>(
|
||||
},
|
||||
[] as GroupBase<ComboboxOption>[]
|
||||
);
|
||||
_options.sort((a) => (a.label === base_model ? -1 : 1));
|
||||
_options.sort((a) => (a.label?.split('/')[0]?.toLowerCase().includes(base_model) ? -1 : 1));
|
||||
return _options;
|
||||
}, [getIsDisabled, modelConfigs, base_model]);
|
||||
}, [modelConfigs, groupByType, getIsDisabled, base_model]);
|
||||
|
||||
const value = useMemo(
|
||||
() =>
|
||||
|
||||
@@ -5,6 +5,8 @@ import {
|
||||
selectControlAdaptersSlice,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
|
||||
import { selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { Layer } from 'features/controlLayers/store/types';
|
||||
import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
|
||||
import { selectNodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
@@ -13,9 +15,16 @@ import { selectGenerationSlice } from 'features/parameters/store/generationSlice
|
||||
import { selectSystemSlice } from 'features/system/store/systemSlice';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import i18n from 'i18next';
|
||||
import { forEach } from 'lodash-es';
|
||||
import { forEach, upperFirst } from 'lodash-es';
|
||||
import { getConnectedEdges } from 'reactflow';
|
||||
|
||||
const LAYER_TYPE_TO_TKEY: Record<Layer['type'], string> = {
|
||||
initial_image_layer: 'controlLayers.globalInitialImage',
|
||||
control_adapter_layer: 'controlLayers.globalControlAdapter',
|
||||
ip_adapter_layer: 'controlLayers.globalIPAdapter',
|
||||
regional_guidance_layer: 'controlLayers.regionalGuidance',
|
||||
};
|
||||
|
||||
const selector = createMemoizedSelector(
|
||||
[
|
||||
selectControlAdaptersSlice,
|
||||
@@ -23,28 +32,27 @@ const selector = createMemoizedSelector(
|
||||
selectSystemSlice,
|
||||
selectNodesSlice,
|
||||
selectDynamicPromptsSlice,
|
||||
selectControlLayersSlice,
|
||||
activeTabNameSelector,
|
||||
],
|
||||
(controlAdapters, generation, system, nodes, dynamicPrompts, activeTabName) => {
|
||||
const { initialImage, model, positivePrompt } = generation;
|
||||
(controlAdapters, generation, system, nodes, dynamicPrompts, controlLayers, activeTabName) => {
|
||||
const { model } = generation;
|
||||
const { size } = controlLayers.present;
|
||||
const { positivePrompt } = controlLayers.present;
|
||||
|
||||
const { isConnected } = system;
|
||||
|
||||
const reasons: string[] = [];
|
||||
const reasons: { prefix?: string; content: string }[] = [];
|
||||
|
||||
// Cannot generate if not connected
|
||||
if (!isConnected) {
|
||||
reasons.push(i18n.t('parameters.invoke.systemDisconnected'));
|
||||
reasons.push({ content: i18n.t('parameters.invoke.systemDisconnected') });
|
||||
}
|
||||
|
||||
if (activeTabName === 'img2img' && !initialImage) {
|
||||
reasons.push(i18n.t('parameters.invoke.noInitialImageSelected'));
|
||||
}
|
||||
|
||||
if (activeTabName === 'nodes') {
|
||||
if (activeTabName === 'workflows') {
|
||||
if (nodes.shouldValidateGraph) {
|
||||
if (!nodes.nodes.length) {
|
||||
reasons.push(i18n.t('parameters.invoke.noNodesInGraph'));
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noNodesInGraph') });
|
||||
}
|
||||
|
||||
nodes.nodes.forEach((node) => {
|
||||
@@ -56,7 +64,7 @@ const selector = createMemoizedSelector(
|
||||
|
||||
if (!nodeTemplate) {
|
||||
// Node type not found
|
||||
reasons.push(i18n.t('parameters.invoke.missingNodeTemplate'));
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingNodeTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -69,17 +77,17 @@ const selector = createMemoizedSelector(
|
||||
);
|
||||
|
||||
if (!fieldTemplate) {
|
||||
reasons.push(i18n.t('parameters.invoke.missingFieldTemplate'));
|
||||
reasons.push({ content: i18n.t('parameters.invoke.missingFieldTemplate') });
|
||||
return;
|
||||
}
|
||||
|
||||
if (fieldTemplate.required && field.value === undefined && !hasConnection) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.missingInputForField', {
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.missingInputForField', {
|
||||
nodeLabel: node.data.label || nodeTemplate.title,
|
||||
fieldLabel: field.label || fieldTemplate.title,
|
||||
})
|
||||
);
|
||||
}),
|
||||
});
|
||||
return;
|
||||
}
|
||||
});
|
||||
@@ -87,44 +95,122 @@ const selector = createMemoizedSelector(
|
||||
}
|
||||
} else {
|
||||
if (dynamicPrompts.prompts.length === 0 && getShouldProcessPrompt(positivePrompt)) {
|
||||
reasons.push(i18n.t('parameters.invoke.noPrompts'));
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noPrompts') });
|
||||
}
|
||||
|
||||
if (!model) {
|
||||
reasons.push(i18n.t('parameters.invoke.noModelSelected'));
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelSelected') });
|
||||
}
|
||||
|
||||
selectControlAdapterAll(controlAdapters).forEach((ca, i) => {
|
||||
if (!ca.isEnabled) {
|
||||
return;
|
||||
}
|
||||
if (activeTabName === 'generation') {
|
||||
// Handling for generation tab
|
||||
controlLayers.present.layers
|
||||
.filter((l) => l.isEnabled)
|
||||
.forEach((l, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layers_one');
|
||||
const layerNumber = i + 1;
|
||||
const layerType = i18n.t(LAYER_TYPE_TO_TKEY[l.type]);
|
||||
const prefix = `${layerLiteral} #${layerNumber} (${layerType})`;
|
||||
const problems: string[] = [];
|
||||
if (l.type === 'control_adapter_layer') {
|
||||
// Must have model
|
||||
if (!l.controlAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (l.controlAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have a control image OR, if it has a processor, it must have a processed image
|
||||
if (!l.controlAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterNoImageSelected'));
|
||||
} else if (l.controlAdapter.processorConfig && !l.controlAdapter.processedImage) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.controlAdapterImageNotProcessed'));
|
||||
}
|
||||
// T2I Adapters require images have dimensions that are multiples of 64
|
||||
if (l.controlAdapter.type === 't2i_adapter' && (size.width % 64 !== 0 || size.height % 64 !== 0)) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.t2iAdapterIncompatibleDimensions'));
|
||||
}
|
||||
}
|
||||
|
||||
if (!ca.model) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
} else if (ca.model.base !== model?.base) {
|
||||
// This should never happen, just a sanity check
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
if (l.type === 'ip_adapter_layer') {
|
||||
// Must have model
|
||||
if (!l.ipAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (l.ipAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have an image
|
||||
if (!l.ipAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoImageSelected'));
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
!ca.controlImage ||
|
||||
(isControlNetOrT2IAdapter(ca) && !ca.processedControlImage && ca.processorType !== 'none')
|
||||
) {
|
||||
reasons.push(
|
||||
i18n.t('parameters.invoke.noControlImageForControlAdapter', {
|
||||
number: i + 1,
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
if (l.type === 'initial_image_layer') {
|
||||
// Must have an image
|
||||
if (!l.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.initialImageNoImageSelected'));
|
||||
}
|
||||
}
|
||||
|
||||
if (l.type === 'regional_guidance_layer') {
|
||||
// Must have a region
|
||||
if (l.maskObjects.length === 0) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.rgNoRegion'));
|
||||
}
|
||||
// Must have at least 1 prompt or IP Adapter
|
||||
if (l.positivePrompt === null && l.negativePrompt === null && l.ipAdapters.length === 0) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.rgNoPromptsOrIPAdapters'));
|
||||
}
|
||||
l.ipAdapters.forEach((ipAdapter) => {
|
||||
// Must have model
|
||||
if (!ipAdapter.model) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoModelSelected'));
|
||||
}
|
||||
// Model base must match
|
||||
if (ipAdapter.model?.base !== model?.base) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterIncompatibleBaseModel'));
|
||||
}
|
||||
// Must have an image
|
||||
if (!ipAdapter.image) {
|
||||
problems.push(i18n.t('parameters.invoke.layer.ipAdapterNoImageSelected'));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (problems.length) {
|
||||
const content = upperFirst(problems.join(', '));
|
||||
reasons.push({ prefix, content });
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Handling for all other tabs
|
||||
selectControlAdapterAll(controlAdapters)
|
||||
.filter((ca) => ca.isEnabled)
|
||||
.forEach((ca, i) => {
|
||||
if (!ca.isEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ca.model) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelForControlAdapter', { number: i + 1 }) });
|
||||
} else if (ca.model.base !== model?.base) {
|
||||
// This should never happen, just a sanity check
|
||||
reasons.push({
|
||||
content: i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', { number: i + 1 }),
|
||||
});
|
||||
}
|
||||
|
||||
if (
|
||||
!ca.controlImage ||
|
||||
(isControlNetOrT2IAdapter(ca) && !ca.processedControlImage && ca.processorType !== 'none')
|
||||
) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noControlImageForControlAdapter', { number: i + 1 }) });
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return { isReady: !reasons.length, reasons };
|
||||
@@ -132,6 +218,6 @@ const selector = createMemoizedSelector(
|
||||
);
|
||||
|
||||
export const useIsReadyToEnqueue = () => {
|
||||
const { isReady, reasons } = useAppSelector(selector);
|
||||
return { isReady, reasons };
|
||||
const value = useAppSelector(selector);
|
||||
return value;
|
||||
};
|
||||
|
||||
3
invokeai/frontend/web/src/common/util/stopPropagation.ts
Normal file
3
invokeai/frontend/web/src/common/util/stopPropagation.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
export const stopPropagation = (e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
};
|
||||
@@ -21,7 +21,6 @@ import {
|
||||
setShouldShowBoundingBox,
|
||||
} from 'features/canvas/store/canvasSlice';
|
||||
import type { CanvasLayer } from 'features/canvas/store/canvasTypes';
|
||||
import { LAYER_NAMES_DICT } from 'features/canvas/store/canvasTypes';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useHotkeys } from 'react-hotkeys-hook';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -216,13 +215,20 @@ const IAICanvasToolbar = () => {
|
||||
[dispatch, isMaskEnabled]
|
||||
);
|
||||
|
||||
const value = useMemo(() => LAYER_NAMES_DICT.filter((o) => o.value === layer)[0], [layer]);
|
||||
const layerOptions = useMemo<{ label: string; value: CanvasLayer }[]>(
|
||||
() => [
|
||||
{ label: t('unifiedCanvas.base'), value: 'base' },
|
||||
{ label: t('unifiedCanvas.mask'), value: 'mask' },
|
||||
],
|
||||
[t]
|
||||
);
|
||||
const layerValue = useMemo(() => layerOptions.filter((o) => o.value === layer)[0] ?? null, [layer, layerOptions]);
|
||||
|
||||
return (
|
||||
<Flex alignItems="center" gap={2} flexWrap="wrap">
|
||||
<Tooltip label={`${t('unifiedCanvas.layer')} (Q)`}>
|
||||
<FormControl isDisabled={isStaging} w="5rem">
|
||||
<Combobox value={value} options={LAYER_NAMES_DICT} onChange={handleChangeLayer} />
|
||||
<Combobox value={layerValue} options={layerOptions} onChange={handleChangeLayer} />
|
||||
</FormControl>
|
||||
</Tooltip>
|
||||
|
||||
|
||||
@@ -75,7 +75,7 @@ const useInpaintingCanvasHotkeys = () => {
|
||||
|
||||
const onKeyDown = useCallback(
|
||||
(e: KeyboardEvent) => {
|
||||
if (e.repeat || e.key !== ' ' || isInteractiveTarget(e.target) || activeTabName !== 'unifiedCanvas') {
|
||||
if (e.repeat || e.key !== ' ' || isInteractiveTarget(e.target) || activeTabName !== 'canvas') {
|
||||
return;
|
||||
}
|
||||
if ($toolStash.get() || $tool.get() === 'move') {
|
||||
@@ -90,7 +90,7 @@ const useInpaintingCanvasHotkeys = () => {
|
||||
);
|
||||
const onKeyUp = useCallback(
|
||||
(e: KeyboardEvent) => {
|
||||
if (e.repeat || e.key !== ' ' || isInteractiveTarget(e.target) || activeTabName !== 'unifiedCanvas') {
|
||||
if (e.repeat || e.key !== ' ' || isInteractiveTarget(e.target) || activeTabName !== 'canvas') {
|
||||
return;
|
||||
}
|
||||
if (!$toolStash.get() || $tool.get() !== 'move') {
|
||||
|
||||
@@ -8,6 +8,7 @@ import calculateScale from 'features/canvas/util/calculateScale';
|
||||
import { STAGE_PADDING_PERCENTAGE } from 'features/canvas/util/constants';
|
||||
import floorCoordinates from 'features/canvas/util/floorCoordinates';
|
||||
import getScaledBoundingBoxDimensions from 'features/canvas/util/getScaledBoundingBoxDimensions';
|
||||
import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize';
|
||||
import { initialAspectRatioState } from 'features/parameters/components/ImageSize/constants';
|
||||
import type { AspectRatioState } from 'features/parameters/components/ImageSize/types';
|
||||
import { modelChanged } from 'features/parameters/store/generationSlice';
|
||||
@@ -588,8 +589,9 @@ export const canvasSlice = createSlice({
|
||||
},
|
||||
extraReducers: (builder) => {
|
||||
builder.addCase(modelChanged, (state, action) => {
|
||||
if (action.meta.previousModel?.base === action.payload?.base) {
|
||||
// The base model hasn't changed, we don't need to optimize the size
|
||||
const newModel = action.payload;
|
||||
if (!newModel || action.meta.previousModel?.base === newModel.base) {
|
||||
// Model was cleared or the base didn't change
|
||||
return;
|
||||
}
|
||||
const optimalDimension = getOptimalDimension(action.payload);
|
||||
@@ -597,14 +599,8 @@ export const canvasSlice = createSlice({
|
||||
if (getIsSizeOptimal(width, height, optimalDimension)) {
|
||||
return;
|
||||
}
|
||||
setBoundingBoxDimensionsReducer(
|
||||
state,
|
||||
{
|
||||
width,
|
||||
height,
|
||||
},
|
||||
optimalDimension
|
||||
);
|
||||
const newSize = calculateNewSize(state.aspectRatio.value, optimalDimension * optimalDimension);
|
||||
setBoundingBoxDimensionsReducer(state, newSize, optimalDimension);
|
||||
});
|
||||
|
||||
builder.addCase(socketQueueItemStatusChanged, (state, action) => {
|
||||
|
||||
@@ -5,11 +5,6 @@ import { z } from 'zod';
|
||||
|
||||
export type CanvasLayer = 'base' | 'mask';
|
||||
|
||||
export const LAYER_NAMES_DICT: { label: string; value: CanvasLayer }[] = [
|
||||
{ label: 'Base', value: 'base' },
|
||||
{ label: 'Mask', value: 'mask' },
|
||||
];
|
||||
|
||||
const zBoundingBoxScaleMethod = z.enum(['none', 'auto', 'manual']);
|
||||
export type BoundingBoxScaleMethod = z.infer<typeof zBoundingBoxScaleMethod>;
|
||||
export const isBoundingBoxScaleMethod = (v: unknown): v is BoundingBoxScaleMethod =>
|
||||
|
||||
@@ -76,7 +76,7 @@ const ControlAdapterConfig = (props: { id: string; number: number }) => {
|
||||
<Box minW={0} w="full" transitionProperty="common" transitionDuration="0.1s">
|
||||
<ParamControlAdapterModel id={id} />
|
||||
</Box>
|
||||
{activeTabName === 'unifiedCanvas' && <ControlNetCanvasImageImports id={id} />}
|
||||
{activeTabName === 'canvas' && <ControlNetCanvasImageImports id={id} />}
|
||||
<IconButton
|
||||
size="sm"
|
||||
tooltip={t('controlnet.duplicate')}
|
||||
|
||||
@@ -13,9 +13,10 @@ import {
|
||||
controlAdapterImageChanged,
|
||||
selectControlAdaptersSlice,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize';
|
||||
import { heightChanged, selectOptimalDimension, widthChanged } from 'features/parameters/store/generationSlice';
|
||||
import { selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { memo, useCallback, useEffect, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@@ -92,15 +93,16 @@ const ControlAdapterImagePreview = ({ isSmall, id }: Props) => {
|
||||
return;
|
||||
}
|
||||
|
||||
if (activeTabName === 'unifiedCanvas') {
|
||||
if (activeTabName === 'canvas') {
|
||||
dispatch(setBoundingBoxDimensions({ width: controlImage.width, height: controlImage.height }, optimalDimension));
|
||||
} else {
|
||||
const options = { updateAspectRatio: true, clamp: true };
|
||||
const { width, height } = calculateNewSize(
|
||||
controlImage.width / controlImage.height,
|
||||
optimalDimension * optimalDimension
|
||||
);
|
||||
dispatch(widthChanged(width));
|
||||
dispatch(heightChanged(height));
|
||||
dispatch(widthChanged({ width, ...options }));
|
||||
dispatch(heightChanged({ height, ...options }));
|
||||
}
|
||||
}, [controlImage, activeTabName, dispatch, optimalDimension]);
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ const ParamControlAdapterIPMethod = ({ id }: Props) => {
|
||||
|
||||
return (
|
||||
<FormControl>
|
||||
<InformationalPopover feature="ipAdapterMethod">
|
||||
<InformationalPopover feature="controlNetResizeMode">
|
||||
<FormLabel>{t('controlnet.ipAdapterMethod')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<Combobox value={value} options={options} isDisabled={!isEnabled} onChange={handleIPMethodChanged} />
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
import type { PayloadAction, Update } from '@reduxjs/toolkit';
|
||||
import { createEntityAdapter, createSlice, isAnyOf } from '@reduxjs/toolkit';
|
||||
import { createEntityAdapter, createSlice } from '@reduxjs/toolkit';
|
||||
import { getSelectorsOptions } from 'app/store/createMemoizedSelector';
|
||||
import type { PersistConfig, RootState } from 'app/store/store';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { buildControlAdapter } from 'features/controlAdapters/util/buildControlAdapter';
|
||||
import { buildControlAdapterProcessor } from 'features/controlAdapters/util/buildControlAdapterProcessor';
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { maskLayerIPAdapterAdded } from 'features/regionalPrompts/store/regionalPromptsSlice';
|
||||
import { merge, uniq } from 'lodash-es';
|
||||
import type { ControlNetModelConfig, IPAdapterModelConfig, T2IAdapterModelConfig } from 'services/api/types';
|
||||
import { socketInvocationError } from 'services/events/actions';
|
||||
@@ -383,10 +382,6 @@ export const controlAdaptersSlice = createSlice({
|
||||
builder.addCase(socketInvocationError, (state) => {
|
||||
state.pendingControlImages = [];
|
||||
});
|
||||
|
||||
builder.addCase(maskLayerIPAdapterAdded, (state, action) => {
|
||||
caAdapter.addOne(state, buildControlAdapter(action.meta.uuid, 'ip_adapter'));
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
@@ -417,8 +412,6 @@ export const {
|
||||
t2iAdaptersReset,
|
||||
} = controlAdaptersSlice.actions;
|
||||
|
||||
export const isAnyControlAdapterAdded = isAnyOf(controlAdapterAdded, controlAdapterRecalled);
|
||||
|
||||
export const selectControlAdaptersSlice = (state: RootState) => state.controlAdapters;
|
||||
|
||||
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
import { Button, Menu, MenuButton, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useAddCALayer, useAddIILayer, useAddIPALayer } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import { rgLayerAdded } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const AddLayerButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const [addCALayer, isAddCALayerDisabled] = useAddCALayer();
|
||||
const [addIPALayer, isAddIPALayerDisabled] = useAddIPALayer();
|
||||
const [addIILayer, isAddIILayerDisabled] = useAddIILayer();
|
||||
const addRGLayer = useCallback(() => {
|
||||
dispatch(rgLayerAdded());
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<Menu>
|
||||
<MenuButton
|
||||
as={Button}
|
||||
leftIcon={<PiPlusBold />}
|
||||
variant="ghost"
|
||||
data-testid="control-layers-add-layer-menu-button"
|
||||
>
|
||||
{t('controlLayers.addLayer')}
|
||||
</MenuButton>
|
||||
<MenuList>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRGLayer}>
|
||||
{t('controlLayers.regionalGuidanceLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addCALayer} isDisabled={isAddCALayerDisabled}>
|
||||
{t('controlLayers.globalControlAdapterLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addIPALayer} isDisabled={isAddIPALayerDisabled}>
|
||||
{t('controlLayers.globalIPAdapterLayer')}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addIILayer} isDisabled={isAddIILayerDisabled}>
|
||||
{t('controlLayers.globalInitialImageLayer')}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
);
|
||||
});
|
||||
|
||||
AddLayerButton.displayName = 'AddLayerButton';
|
||||
@@ -1,13 +1,13 @@
|
||||
import { Button, Flex } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useAddIPAdapterToIPALayer } from 'features/controlLayers/hooks/addLayerHooks';
|
||||
import {
|
||||
isVectorMaskLayer,
|
||||
maskLayerIPAdapterAdded,
|
||||
maskLayerNegativePromptChanged,
|
||||
maskLayerPositivePromptChanged,
|
||||
selectRegionalPromptsSlice,
|
||||
} from 'features/regionalPrompts/store/regionalPromptsSlice';
|
||||
isRegionalGuidanceLayer,
|
||||
rgLayerNegativePromptChanged,
|
||||
rgLayerPositivePromptChanged,
|
||||
selectControlLayersSlice,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
@@ -19,11 +19,12 @@ type AddPromptButtonProps = {
|
||||
export const AddPromptButtons = ({ layerId }: AddPromptButtonProps) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const [addIPAdapter, isAddIPAdapterDisabled] = useAddIPAdapterToIPALayer(layerId);
|
||||
const selectValidActions = useMemo(
|
||||
() =>
|
||||
createMemoizedSelector(selectRegionalPromptsSlice, (regionalPrompts) => {
|
||||
const layer = regionalPrompts.present.layers.find((l) => l.id === layerId);
|
||||
assert(isVectorMaskLayer(layer), `Layer ${layerId} not found or not an RP layer`);
|
||||
createMemoizedSelector(selectControlLayersSlice, (controlLayers) => {
|
||||
const layer = controlLayers.present.layers.find((l) => l.id === layerId);
|
||||
assert(isRegionalGuidanceLayer(layer), `Layer ${layerId} not found or not an RP layer`);
|
||||
return {
|
||||
canAddPositivePrompt: layer.positivePrompt === null,
|
||||
canAddNegativePrompt: layer.negativePrompt === null,
|
||||
@@ -33,13 +34,10 @@ export const AddPromptButtons = ({ layerId }: AddPromptButtonProps) => {
|
||||
);
|
||||
const validActions = useAppSelector(selectValidActions);
|
||||
const addPositivePrompt = useCallback(() => {
|
||||
dispatch(maskLayerPositivePromptChanged({ layerId, prompt: '' }));
|
||||
dispatch(rgLayerPositivePromptChanged({ layerId, prompt: '' }));
|
||||
}, [dispatch, layerId]);
|
||||
const addNegativePrompt = useCallback(() => {
|
||||
dispatch(maskLayerNegativePromptChanged({ layerId, prompt: '' }));
|
||||
}, [dispatch, layerId]);
|
||||
const addIPAdapter = useCallback(() => {
|
||||
dispatch(maskLayerIPAdapterAdded(layerId));
|
||||
dispatch(rgLayerNegativePromptChanged({ layerId, prompt: '' }));
|
||||
}, [dispatch, layerId]);
|
||||
|
||||
return (
|
||||
@@ -62,7 +60,13 @@ export const AddPromptButtons = ({ layerId }: AddPromptButtonProps) => {
|
||||
>
|
||||
{t('common.negativePrompt')}
|
||||
</Button>
|
||||
<Button size="sm" variant="ghost" leftIcon={<PiPlusBold />} onClick={addIPAdapter}>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
leftIcon={<PiPlusBold />}
|
||||
onClick={addIPAdapter}
|
||||
isDisabled={isAddIPAdapterDisabled}
|
||||
>
|
||||
{t('common.ipAdapter')}
|
||||
</Button>
|
||||
</Flex>
|
||||
@@ -10,7 +10,7 @@ import {
|
||||
PopoverTrigger,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { brushSizeChanged, initialRegionalPromptsState } from 'features/regionalPrompts/store/regionalPromptsSlice';
|
||||
import { brushSizeChanged, initialControlLayersState } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
@@ -20,22 +20,22 @@ const formatPx = (v: number | string) => `${v} px`;
|
||||
export const BrushSize = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { t } = useTranslation();
|
||||
const brushSize = useAppSelector((s) => s.regionalPrompts.present.brushSize);
|
||||
const brushSize = useAppSelector((s) => s.controlLayers.present.brushSize);
|
||||
const onChange = useCallback(
|
||||
(v: number) => {
|
||||
dispatch(brushSizeChanged(v));
|
||||
dispatch(brushSizeChanged(Math.round(v)));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
return (
|
||||
<FormControl w="min-content">
|
||||
<FormLabel m={0}>{t('regionalPrompts.brushSize')}</FormLabel>
|
||||
<FormLabel m={0}>{t('controlLayers.brushSize')}</FormLabel>
|
||||
<Popover isLazy>
|
||||
<PopoverTrigger>
|
||||
<CompositeNumberInput
|
||||
min={1}
|
||||
max={600}
|
||||
defaultValue={initialRegionalPromptsState.brushSize}
|
||||
defaultValue={initialControlLayersState.brushSize}
|
||||
value={brushSize}
|
||||
onChange={onChange}
|
||||
w={24}
|
||||
@@ -48,7 +48,7 @@ export const BrushSize = memo(() => {
|
||||
<CompositeSlider
|
||||
min={1}
|
||||
max={300}
|
||||
defaultValue={initialRegionalPromptsState.brushSize}
|
||||
defaultValue={initialControlLayersState.brushSize}
|
||||
value={brushSize}
|
||||
onChange={onChange}
|
||||
marks={marks}
|
||||
@@ -0,0 +1,45 @@
|
||||
import { Flex, Spacer, useDisclosure } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { CALayerControlAdapterWrapper } from 'features/controlLayers/components/CALayer/CALayerControlAdapterWrapper';
|
||||
import { LayerDeleteButton } from 'features/controlLayers/components/LayerCommon/LayerDeleteButton';
|
||||
import { LayerMenu } from 'features/controlLayers/components/LayerCommon/LayerMenu';
|
||||
import { LayerTitle } from 'features/controlLayers/components/LayerCommon/LayerTitle';
|
||||
import { LayerVisibilityToggle } from 'features/controlLayers/components/LayerCommon/LayerVisibilityToggle';
|
||||
import { LayerWrapper } from 'features/controlLayers/components/LayerCommon/LayerWrapper';
|
||||
import { layerSelected, selectCALayerOrThrow } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
|
||||
import CALayerOpacity from './CALayerOpacity';
|
||||
|
||||
type Props = {
|
||||
layerId: string;
|
||||
};
|
||||
|
||||
export const CALayer = memo(({ layerId }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const isSelected = useAppSelector((s) => selectCALayerOrThrow(s.controlLayers.present, layerId).isSelected);
|
||||
const onClick = useCallback(() => {
|
||||
dispatch(layerSelected(layerId));
|
||||
}, [dispatch, layerId]);
|
||||
const { isOpen, onToggle } = useDisclosure({ defaultIsOpen: true });
|
||||
|
||||
return (
|
||||
<LayerWrapper onClick={onClick} borderColor={isSelected ? 'base.400' : 'base.800'}>
|
||||
<Flex gap={3} alignItems="center" p={3} cursor="pointer" onDoubleClick={onToggle}>
|
||||
<LayerVisibilityToggle layerId={layerId} />
|
||||
<LayerTitle type="control_adapter_layer" />
|
||||
<Spacer />
|
||||
<CALayerOpacity layerId={layerId} />
|
||||
<LayerMenu layerId={layerId} />
|
||||
<LayerDeleteButton layerId={layerId} />
|
||||
</Flex>
|
||||
{isOpen && (
|
||||
<Flex flexDir="column" gap={3} px={3} pb={3}>
|
||||
<CALayerControlAdapterWrapper layerId={layerId} />
|
||||
</Flex>
|
||||
)}
|
||||
</LayerWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
CALayer.displayName = 'CALayer';
|
||||
@@ -0,0 +1,121 @@
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { ControlAdapter } from 'features/controlLayers/components/ControlAndIPAdapter/ControlAdapter';
|
||||
import {
|
||||
caLayerControlModeChanged,
|
||||
caLayerImageChanged,
|
||||
caLayerModelChanged,
|
||||
caLayerProcessorConfigChanged,
|
||||
caOrIPALayerBeginEndStepPctChanged,
|
||||
caOrIPALayerWeightChanged,
|
||||
selectCALayerOrThrow,
|
||||
} from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { ControlModeV2, ProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { CALayerImageDropData } from 'features/dnd/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import type {
|
||||
CALayerImagePostUploadAction,
|
||||
ControlNetModelConfig,
|
||||
ImageDTO,
|
||||
T2IAdapterModelConfig,
|
||||
} from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
layerId: string;
|
||||
};
|
||||
|
||||
export const CALayerControlAdapterWrapper = memo(({ layerId }: Props) => {
|
||||
const dispatch = useAppDispatch();
|
||||
const controlAdapter = useAppSelector((s) => selectCALayerOrThrow(s.controlLayers.present, layerId).controlAdapter);
|
||||
|
||||
const onChangeBeginEndStepPct = useCallback(
|
||||
(beginEndStepPct: [number, number]) => {
|
||||
dispatch(
|
||||
caOrIPALayerBeginEndStepPctChanged({
|
||||
layerId,
|
||||
beginEndStepPct,
|
||||
})
|
||||
);
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeControlMode = useCallback(
|
||||
(controlMode: ControlModeV2) => {
|
||||
dispatch(
|
||||
caLayerControlModeChanged({
|
||||
layerId,
|
||||
controlMode,
|
||||
})
|
||||
);
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeWeight = useCallback(
|
||||
(weight: number) => {
|
||||
dispatch(caOrIPALayerWeightChanged({ layerId, weight }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeProcessorConfig = useCallback(
|
||||
(processorConfig: ProcessorConfig | null) => {
|
||||
dispatch(caLayerProcessorConfigChanged({ layerId, processorConfig }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeModel = useCallback(
|
||||
(modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => {
|
||||
dispatch(
|
||||
caLayerModelChanged({
|
||||
layerId,
|
||||
modelConfig,
|
||||
})
|
||||
);
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const onChangeImage = useCallback(
|
||||
(imageDTO: ImageDTO | null) => {
|
||||
dispatch(caLayerImageChanged({ layerId, imageDTO }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
|
||||
const droppableData = useMemo<CALayerImageDropData>(
|
||||
() => ({
|
||||
actionType: 'SET_CA_LAYER_IMAGE',
|
||||
context: {
|
||||
layerId,
|
||||
},
|
||||
id: layerId,
|
||||
}),
|
||||
[layerId]
|
||||
);
|
||||
|
||||
const postUploadAction = useMemo<CALayerImagePostUploadAction>(
|
||||
() => ({
|
||||
layerId,
|
||||
type: 'SET_CA_LAYER_IMAGE',
|
||||
}),
|
||||
[layerId]
|
||||
);
|
||||
|
||||
return (
|
||||
<ControlAdapter
|
||||
controlAdapter={controlAdapter}
|
||||
onChangeBeginEndStepPct={onChangeBeginEndStepPct}
|
||||
onChangeControlMode={onChangeControlMode}
|
||||
onChangeWeight={onChangeWeight}
|
||||
onChangeProcessorConfig={onChangeProcessorConfig}
|
||||
onChangeModel={onChangeModel}
|
||||
onChangeImage={onChangeImage}
|
||||
droppableData={droppableData}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
CALayerControlAdapterWrapper.displayName = 'CALayerControlAdapterWrapper';
|
||||
@@ -0,0 +1,98 @@
|
||||
import {
|
||||
CompositeNumberInput,
|
||||
CompositeSlider,
|
||||
Flex,
|
||||
FormControl,
|
||||
FormLabel,
|
||||
IconButton,
|
||||
Popover,
|
||||
PopoverArrow,
|
||||
PopoverBody,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
Switch,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { stopPropagation } from 'common/util/stopPropagation';
|
||||
import { useLayerOpacity } from 'features/controlLayers/hooks/layerStateHooks';
|
||||
import { caLayerIsFilterEnabledChanged, caLayerOpacityChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiDropHalfFill } from 'react-icons/pi';
|
||||
|
||||
type Props = {
|
||||
layerId: string;
|
||||
};
|
||||
|
||||
const marks = [0, 25, 50, 75, 100];
|
||||
const formatPct = (v: number | string) => `${v} %`;
|
||||
|
||||
const CALayerOpacity = ({ layerId }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const { opacity, isFilterEnabled } = useLayerOpacity(layerId);
|
||||
const onChangeOpacity = useCallback(
|
||||
(v: number) => {
|
||||
dispatch(caLayerOpacityChanged({ layerId, opacity: v / 100 }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
const onChangeFilter = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
dispatch(caLayerIsFilterEnabledChanged({ layerId, isFilterEnabled: e.target.checked }));
|
||||
},
|
||||
[dispatch, layerId]
|
||||
);
|
||||
return (
|
||||
<Popover isLazy>
|
||||
<PopoverTrigger>
|
||||
<IconButton
|
||||
aria-label={t('controlLayers.opacity')}
|
||||
size="sm"
|
||||
icon={<PiDropHalfFill size={16} />}
|
||||
variant="ghost"
|
||||
onDoubleClick={stopPropagation}
|
||||
/>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent onDoubleClick={stopPropagation}>
|
||||
<PopoverArrow />
|
||||
<PopoverBody>
|
||||
<Flex direction="column" gap={2}>
|
||||
<FormControl orientation="horizontal" w="full">
|
||||
<FormLabel m={0} flexGrow={1} cursor="pointer">
|
||||
{t('controlLayers.opacityFilter')}
|
||||
</FormLabel>
|
||||
<Switch isChecked={isFilterEnabled} onChange={onChangeFilter} />
|
||||
</FormControl>
|
||||
<FormControl orientation="horizontal">
|
||||
<FormLabel m={0}>{t('controlLayers.opacity')}</FormLabel>
|
||||
<CompositeSlider
|
||||
min={0}
|
||||
max={100}
|
||||
step={1}
|
||||
value={opacity}
|
||||
defaultValue={100}
|
||||
onChange={onChangeOpacity}
|
||||
marks={marks}
|
||||
w={48}
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
min={0}
|
||||
max={100}
|
||||
step={1}
|
||||
value={opacity}
|
||||
defaultValue={100}
|
||||
onChange={onChangeOpacity}
|
||||
w={24}
|
||||
format={formatPct}
|
||||
/>
|
||||
</FormControl>
|
||||
</Flex>
|
||||
</PopoverBody>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(CALayerOpacity);
|
||||
@@ -0,0 +1,117 @@
|
||||
import { Box, Divider, Flex, Icon, IconButton } from '@invoke-ai/ui-library';
|
||||
import { ControlAdapterModelCombobox } from 'features/controlLayers/components/ControlAndIPAdapter/ControlAdapterModelCombobox';
|
||||
import type {
|
||||
ControlModeV2,
|
||||
ControlNetConfigV2,
|
||||
ProcessorConfig,
|
||||
T2IAdapterConfigV2,
|
||||
} from 'features/controlLayers/util/controlAdapters';
|
||||
import type { TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiCaretUpBold } from 'react-icons/pi';
|
||||
import { useToggle } from 'react-use';
|
||||
import type { ControlNetModelConfig, ImageDTO, PostUploadAction, T2IAdapterModelConfig } from 'services/api/types';
|
||||
|
||||
import { ControlAdapterBeginEndStepPct } from './ControlAdapterBeginEndStepPct';
|
||||
import { ControlAdapterControlModeSelect } from './ControlAdapterControlModeSelect';
|
||||
import { ControlAdapterImagePreview } from './ControlAdapterImagePreview';
|
||||
import { ControlAdapterProcessorConfig } from './ControlAdapterProcessorConfig';
|
||||
import { ControlAdapterProcessorTypeSelect } from './ControlAdapterProcessorTypeSelect';
|
||||
import { ControlAdapterWeight } from './ControlAdapterWeight';
|
||||
|
||||
type Props = {
|
||||
controlAdapter: ControlNetConfigV2 | T2IAdapterConfigV2;
|
||||
onChangeBeginEndStepPct: (beginEndStepPct: [number, number]) => void;
|
||||
onChangeControlMode: (controlMode: ControlModeV2) => void;
|
||||
onChangeWeight: (weight: number) => void;
|
||||
onChangeProcessorConfig: (processorConfig: ProcessorConfig | null) => void;
|
||||
onChangeModel: (modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => void;
|
||||
onChangeImage: (imageDTO: ImageDTO | null) => void;
|
||||
droppableData: TypesafeDroppableData;
|
||||
postUploadAction: PostUploadAction;
|
||||
};
|
||||
|
||||
export const ControlAdapter = memo(
|
||||
({
|
||||
controlAdapter,
|
||||
onChangeBeginEndStepPct,
|
||||
onChangeControlMode,
|
||||
onChangeWeight,
|
||||
onChangeProcessorConfig,
|
||||
onChangeModel,
|
||||
onChangeImage,
|
||||
droppableData,
|
||||
postUploadAction,
|
||||
}: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const [isExpanded, toggleIsExpanded] = useToggle(false);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={3} position="relative" w="full">
|
||||
<Flex gap={3} alignItems="center" w="full">
|
||||
<Box minW={0} w="full" transitionProperty="common" transitionDuration="0.1s">
|
||||
<ControlAdapterModelCombobox modelKey={controlAdapter.model?.key ?? null} onChange={onChangeModel} />
|
||||
</Box>
|
||||
|
||||
<IconButton
|
||||
size="sm"
|
||||
tooltip={isExpanded ? t('controlnet.hideAdvanced') : t('controlnet.showAdvanced')}
|
||||
aria-label={isExpanded ? t('controlnet.hideAdvanced') : t('controlnet.showAdvanced')}
|
||||
onClick={toggleIsExpanded}
|
||||
variant="ghost"
|
||||
icon={
|
||||
<Icon
|
||||
boxSize={4}
|
||||
as={PiCaretUpBold}
|
||||
transform={isExpanded ? 'rotate(0deg)' : 'rotate(180deg)'}
|
||||
transitionProperty="common"
|
||||
transitionDuration="normal"
|
||||
/>
|
||||
}
|
||||
/>
|
||||
</Flex>
|
||||
<Flex gap={3} w="full">
|
||||
<Flex flexDir="column" gap={3} w="full" h="full">
|
||||
{controlAdapter.type === 'controlnet' && (
|
||||
<ControlAdapterControlModeSelect
|
||||
controlMode={controlAdapter.controlMode}
|
||||
onChange={onChangeControlMode}
|
||||
/>
|
||||
)}
|
||||
<ControlAdapterWeight weight={controlAdapter.weight} onChange={onChangeWeight} />
|
||||
<ControlAdapterBeginEndStepPct
|
||||
beginEndStepPct={controlAdapter.beginEndStepPct}
|
||||
onChange={onChangeBeginEndStepPct}
|
||||
/>
|
||||
</Flex>
|
||||
<Flex alignItems="center" justifyContent="center" h={36} w={36} aspectRatio="1/1">
|
||||
<ControlAdapterImagePreview
|
||||
controlAdapter={controlAdapter}
|
||||
onChangeImage={onChangeImage}
|
||||
droppableData={droppableData}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
</Flex>
|
||||
</Flex>
|
||||
{isExpanded && (
|
||||
<>
|
||||
<Divider />
|
||||
<Flex flexDir="column" gap={3} w="full">
|
||||
<ControlAdapterProcessorTypeSelect
|
||||
config={controlAdapter.processorConfig}
|
||||
onChange={onChangeProcessorConfig}
|
||||
/>
|
||||
<ControlAdapterProcessorConfig
|
||||
config={controlAdapter.processorConfig}
|
||||
onChange={onChangeProcessorConfig}
|
||||
/>
|
||||
</Flex>
|
||||
</>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
ControlAdapter.displayName = 'ControlAdapter';
|
||||
@@ -0,0 +1,43 @@
|
||||
import { CompositeRangeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
type Props = {
|
||||
beginEndStepPct: [number, number];
|
||||
onChange: (beginEndStepPct: [number, number]) => void;
|
||||
};
|
||||
|
||||
const formatPct = (v: number) => `${Math.round(v * 100)}%`;
|
||||
const ariaLabel = ['Begin Step %', 'End Step %'];
|
||||
|
||||
export const ControlAdapterBeginEndStepPct = memo(({ beginEndStepPct, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const onReset = useCallback(() => {
|
||||
onChange([0, 1]);
|
||||
}, [onChange]);
|
||||
|
||||
return (
|
||||
<FormControl orientation="horizontal">
|
||||
<InformationalPopover feature="controlNetBeginEnd">
|
||||
<FormLabel m={0}>{t('controlnet.beginEndStepPercentShort')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<CompositeRangeSlider
|
||||
aria-label={ariaLabel}
|
||||
value={beginEndStepPct}
|
||||
onChange={onChange}
|
||||
onReset={onReset}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.05}
|
||||
fineStep={0.01}
|
||||
minStepsBetweenThumbs={1}
|
||||
formatValue={formatPct}
|
||||
marks
|
||||
withThumbTooltip
|
||||
/>
|
||||
</FormControl>
|
||||
);
|
||||
});
|
||||
|
||||
ControlAdapterBeginEndStepPct.displayName = 'ControlAdapterBeginEndStepPct';
|
||||
@@ -0,0 +1,60 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import type { ControlModeV2 } from 'features/controlLayers/util/controlAdapters';
|
||||
import { isControlModeV2 } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
type Props = {
|
||||
controlMode: ControlModeV2;
|
||||
onChange: (controlMode: ControlModeV2) => void;
|
||||
};
|
||||
|
||||
export const ControlAdapterControlModeSelect = memo(({ controlMode, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const CONTROL_MODE_DATA = useMemo(
|
||||
() => [
|
||||
{ label: t('controlnet.balanced'), value: 'balanced' },
|
||||
{ label: t('controlnet.prompt'), value: 'more_prompt' },
|
||||
{ label: t('controlnet.control'), value: 'more_control' },
|
||||
{ label: t('controlnet.megaControl'), value: 'unbalanced' },
|
||||
],
|
||||
[t]
|
||||
);
|
||||
|
||||
const handleControlModeChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
assert(isControlModeV2(v?.value));
|
||||
onChange(v.value);
|
||||
},
|
||||
[onChange]
|
||||
);
|
||||
|
||||
const value = useMemo(
|
||||
() => CONTROL_MODE_DATA.filter((o) => o.value === controlMode)[0],
|
||||
[CONTROL_MODE_DATA, controlMode]
|
||||
);
|
||||
|
||||
if (!controlMode) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<FormControl>
|
||||
<InformationalPopover feature="controlNetControlMode">
|
||||
<FormLabel m={0}>{t('controlnet.control')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<Combobox
|
||||
value={value}
|
||||
options={CONTROL_MODE_DATA}
|
||||
onChange={handleControlModeChange}
|
||||
isClearable={false}
|
||||
isSearchable={false}
|
||||
/>
|
||||
</FormControl>
|
||||
);
|
||||
});
|
||||
|
||||
ControlAdapterControlModeSelect.displayName = 'ControlAdapterControlModeSelect';
|
||||
@@ -0,0 +1,217 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Box, Flex, Spinner, useShiftModifier } from '@invoke-ai/ui-library';
|
||||
import { skipToken } from '@reduxjs/toolkit/query';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIDndImage from 'common/components/IAIDndImage';
|
||||
import IAIDndImageIcon from 'common/components/IAIDndImageIcon';
|
||||
import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice';
|
||||
import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { ControlNetConfigV2, T2IAdapterConfigV2 } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { ImageDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize';
|
||||
import { selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { memo, useCallback, useEffect, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowCounterClockwiseBold, PiFloppyDiskBold, PiRulerBold } from 'react-icons/pi';
|
||||
import {
|
||||
useAddImageToBoardMutation,
|
||||
useChangeImageIsIntermediateMutation,
|
||||
useGetImageDTOQuery,
|
||||
useRemoveImageFromBoardMutation,
|
||||
} from 'services/api/endpoints/images';
|
||||
import type { ImageDTO, PostUploadAction } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
controlAdapter: ControlNetConfigV2 | T2IAdapterConfigV2;
|
||||
onChangeImage: (imageDTO: ImageDTO | null) => void;
|
||||
droppableData: TypesafeDroppableData;
|
||||
postUploadAction: PostUploadAction;
|
||||
};
|
||||
|
||||
export const ControlAdapterImagePreview = memo(
|
||||
({ controlAdapter, onChangeImage, droppableData, postUploadAction }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId);
|
||||
const isConnected = useAppSelector((s) => s.system.isConnected);
|
||||
const activeTabName = useAppSelector(activeTabNameSelector);
|
||||
const optimalDimension = useAppSelector(selectOptimalDimension);
|
||||
const shift = useShiftModifier();
|
||||
|
||||
const [isMouseOverImage, setIsMouseOverImage] = useState(false);
|
||||
|
||||
const { currentData: controlImage, isError: isErrorControlImage } = useGetImageDTOQuery(
|
||||
controlAdapter.image?.name ?? skipToken
|
||||
);
|
||||
const { currentData: processedControlImage, isError: isErrorProcessedControlImage } = useGetImageDTOQuery(
|
||||
controlAdapter.processedImage?.name ?? skipToken
|
||||
);
|
||||
|
||||
const [changeIsIntermediate] = useChangeImageIsIntermediateMutation();
|
||||
const [addToBoard] = useAddImageToBoardMutation();
|
||||
const [removeFromBoard] = useRemoveImageFromBoardMutation();
|
||||
const handleResetControlImage = useCallback(() => {
|
||||
onChangeImage(null);
|
||||
}, [onChangeImage]);
|
||||
|
||||
const handleSaveControlImage = useCallback(async () => {
|
||||
if (!processedControlImage) {
|
||||
return;
|
||||
}
|
||||
|
||||
await changeIsIntermediate({
|
||||
imageDTO: processedControlImage,
|
||||
is_intermediate: false,
|
||||
}).unwrap();
|
||||
|
||||
if (autoAddBoardId !== 'none') {
|
||||
addToBoard({
|
||||
imageDTO: processedControlImage,
|
||||
board_id: autoAddBoardId,
|
||||
});
|
||||
} else {
|
||||
removeFromBoard({ imageDTO: processedControlImage });
|
||||
}
|
||||
}, [processedControlImage, changeIsIntermediate, autoAddBoardId, addToBoard, removeFromBoard]);
|
||||
|
||||
const handleSetControlImageToDimensions = useCallback(() => {
|
||||
if (!controlImage) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (activeTabName === 'canvas') {
|
||||
dispatch(
|
||||
setBoundingBoxDimensions({ width: controlImage.width, height: controlImage.height }, optimalDimension)
|
||||
);
|
||||
} else {
|
||||
const options = { updateAspectRatio: true, clamp: true };
|
||||
|
||||
if (shift) {
|
||||
const { width, height } = controlImage;
|
||||
dispatch(widthChanged({ width, ...options }));
|
||||
dispatch(heightChanged({ height, ...options }));
|
||||
} else {
|
||||
const { width, height } = calculateNewSize(
|
||||
controlImage.width / controlImage.height,
|
||||
optimalDimension * optimalDimension
|
||||
);
|
||||
dispatch(widthChanged({ width, ...options }));
|
||||
dispatch(heightChanged({ height, ...options }));
|
||||
}
|
||||
}
|
||||
}, [controlImage, activeTabName, dispatch, optimalDimension, shift]);
|
||||
|
||||
const handleMouseEnter = useCallback(() => {
|
||||
setIsMouseOverImage(true);
|
||||
}, []);
|
||||
|
||||
const handleMouseLeave = useCallback(() => {
|
||||
setIsMouseOverImage(false);
|
||||
}, []);
|
||||
|
||||
const draggableData = useMemo<ImageDraggableData | undefined>(() => {
|
||||
if (controlImage) {
|
||||
return {
|
||||
id: controlAdapter.id,
|
||||
payloadType: 'IMAGE_DTO',
|
||||
payload: { imageDTO: controlImage },
|
||||
};
|
||||
}
|
||||
}, [controlImage, controlAdapter.id]);
|
||||
|
||||
const shouldShowProcessedImage =
|
||||
controlImage &&
|
||||
processedControlImage &&
|
||||
!isMouseOverImage &&
|
||||
!controlAdapter.processorPendingBatchId &&
|
||||
controlAdapter.processorConfig !== null;
|
||||
|
||||
useEffect(() => {
|
||||
if (isConnected && (isErrorControlImage || isErrorProcessedControlImage)) {
|
||||
handleResetControlImage();
|
||||
}
|
||||
}, [handleResetControlImage, isConnected, isErrorControlImage, isErrorProcessedControlImage]);
|
||||
|
||||
return (
|
||||
<Flex
|
||||
onMouseEnter={handleMouseEnter}
|
||||
onMouseLeave={handleMouseLeave}
|
||||
position="relative"
|
||||
w="full"
|
||||
h={36}
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
>
|
||||
<IAIDndImage
|
||||
draggableData={draggableData}
|
||||
droppableData={droppableData}
|
||||
imageDTO={controlImage}
|
||||
isDropDisabled={shouldShowProcessedImage}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
|
||||
<Box
|
||||
position="absolute"
|
||||
top={0}
|
||||
insetInlineStart={0}
|
||||
w="full"
|
||||
h="full"
|
||||
opacity={shouldShowProcessedImage ? 1 : 0}
|
||||
transitionProperty="common"
|
||||
transitionDuration="normal"
|
||||
pointerEvents="none"
|
||||
>
|
||||
<IAIDndImage
|
||||
draggableData={draggableData}
|
||||
droppableData={droppableData}
|
||||
imageDTO={processedControlImage}
|
||||
isUploadDisabled={true}
|
||||
/>
|
||||
</Box>
|
||||
|
||||
<>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleResetControlImage}
|
||||
icon={controlImage ? <PiArrowCounterClockwiseBold size={16} /> : undefined}
|
||||
tooltip={t('controlnet.resetControlImage')}
|
||||
/>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleSaveControlImage}
|
||||
icon={controlImage ? <PiFloppyDiskBold size={16} /> : undefined}
|
||||
tooltip={t('controlnet.saveControlImage')}
|
||||
styleOverrides={saveControlImageStyleOverrides}
|
||||
/>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleSetControlImageToDimensions}
|
||||
icon={controlImage ? <PiRulerBold size={16} /> : undefined}
|
||||
tooltip={shift ? t('controlnet.setControlImageDimensionsForce') : t('controlnet.setControlImageDimensions')}
|
||||
styleOverrides={setControlImageDimensionsStyleOverrides}
|
||||
/>
|
||||
</>
|
||||
|
||||
{controlAdapter.processorPendingBatchId !== null && (
|
||||
<Flex
|
||||
position="absolute"
|
||||
top={0}
|
||||
insetInlineStart={0}
|
||||
w="full"
|
||||
h="full"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
opacity={0.8}
|
||||
borderRadius="base"
|
||||
bg="base.900"
|
||||
>
|
||||
<Spinner size="xl" color="base.400" />
|
||||
</Flex>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
ControlAdapterImagePreview.displayName = 'ControlAdapterImagePreview';
|
||||
|
||||
const saveControlImageStyleOverrides: SystemStyleObject = { mt: 6 };
|
||||
const setControlImageDimensionsStyleOverrides: SystemStyleObject = { mt: 12 };
|
||||
@@ -0,0 +1,63 @@
|
||||
import { Combobox, FormControl, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useControlNetAndT2IAdapterModels } from 'services/api/hooks/modelsByType';
|
||||
import type { AnyModelConfig, ControlNetModelConfig, T2IAdapterModelConfig } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
modelKey: string | null;
|
||||
onChange: (modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => void;
|
||||
};
|
||||
|
||||
export const ControlAdapterModelCombobox = memo(({ modelKey, onChange: onChangeModel }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const currentBaseModel = useAppSelector((s) => s.generation.model?.base);
|
||||
const [modelConfigs, { isLoading }] = useControlNetAndT2IAdapterModels();
|
||||
const selectedModel = useMemo(() => modelConfigs.find((m) => m.key === modelKey), [modelConfigs, modelKey]);
|
||||
|
||||
const _onChange = useCallback(
|
||||
(modelConfig: ControlNetModelConfig | T2IAdapterModelConfig | null) => {
|
||||
if (!modelConfig) {
|
||||
return;
|
||||
}
|
||||
onChangeModel(modelConfig);
|
||||
},
|
||||
[onChangeModel]
|
||||
);
|
||||
|
||||
const getIsDisabled = useCallback(
|
||||
(model: AnyModelConfig): boolean => {
|
||||
const isCompatible = currentBaseModel === model.base;
|
||||
const hasMainModel = Boolean(currentBaseModel);
|
||||
return !hasMainModel || !isCompatible;
|
||||
},
|
||||
[currentBaseModel]
|
||||
);
|
||||
|
||||
const { options, value, onChange, noOptionsMessage } = useGroupedModelCombobox({
|
||||
modelConfigs,
|
||||
onChange: _onChange,
|
||||
selectedModel,
|
||||
getIsDisabled,
|
||||
isLoading,
|
||||
groupByType: true,
|
||||
});
|
||||
|
||||
return (
|
||||
<Tooltip label={selectedModel?.description}>
|
||||
<FormControl isInvalid={!value || currentBaseModel !== selectedModel?.base} w="full">
|
||||
<Combobox
|
||||
options={options}
|
||||
placeholder={t('controlnet.selectModel')}
|
||||
value={value}
|
||||
onChange={onChange}
|
||||
noOptionsMessage={noOptionsMessage}
|
||||
/>
|
||||
</FormControl>
|
||||
</Tooltip>
|
||||
);
|
||||
});
|
||||
|
||||
ControlAdapterModelCombobox.displayName = 'ControlAdapterModelCombobox';
|
||||
@@ -0,0 +1,85 @@
|
||||
import type { ProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo } from 'react';
|
||||
|
||||
import { CannyProcessor } from './processors/CannyProcessor';
|
||||
import { ColorMapProcessor } from './processors/ColorMapProcessor';
|
||||
import { ContentShuffleProcessor } from './processors/ContentShuffleProcessor';
|
||||
import { DepthAnythingProcessor } from './processors/DepthAnythingProcessor';
|
||||
import { DWOpenposeProcessor } from './processors/DWOpenposeProcessor';
|
||||
import { HedProcessor } from './processors/HedProcessor';
|
||||
import { LineartProcessor } from './processors/LineartProcessor';
|
||||
import { MediapipeFaceProcessor } from './processors/MediapipeFaceProcessor';
|
||||
import { MidasDepthProcessor } from './processors/MidasDepthProcessor';
|
||||
import { MlsdImageProcessor } from './processors/MlsdImageProcessor';
|
||||
import { PidiProcessor } from './processors/PidiProcessor';
|
||||
|
||||
type Props = {
|
||||
config: ProcessorConfig | null;
|
||||
onChange: (config: ProcessorConfig | null) => void;
|
||||
};
|
||||
|
||||
export const ControlAdapterProcessorConfig = memo(({ config, onChange }: Props) => {
|
||||
if (!config) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (config.type === 'canny_image_processor') {
|
||||
return <CannyProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'color_map_image_processor') {
|
||||
return <ColorMapProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'depth_anything_image_processor') {
|
||||
return <DepthAnythingProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'hed_image_processor') {
|
||||
return <HedProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'lineart_image_processor') {
|
||||
return <LineartProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'content_shuffle_image_processor') {
|
||||
return <ContentShuffleProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'lineart_anime_image_processor') {
|
||||
// No configurable options for this processor
|
||||
return null;
|
||||
}
|
||||
|
||||
if (config.type === 'mediapipe_face_processor') {
|
||||
return <MediapipeFaceProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'midas_depth_image_processor') {
|
||||
return <MidasDepthProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'mlsd_image_processor') {
|
||||
return <MlsdImageProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'normalbae_image_processor') {
|
||||
// No configurable options for this processor
|
||||
return null;
|
||||
}
|
||||
|
||||
if (config.type === 'dw_openpose_image_processor') {
|
||||
return <DWOpenposeProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'pidi_image_processor') {
|
||||
return <PidiProcessor onChange={onChange} config={config} />;
|
||||
}
|
||||
|
||||
if (config.type === 'zoe_depth_image_processor') {
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
ControlAdapterProcessorConfig.displayName = 'ControlAdapterProcessorConfig';
|
||||
@@ -0,0 +1,70 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, Flex, FormControl, FormLabel, IconButton } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import type { ProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { CA_PROCESSOR_DATA, isProcessorTypeV2 } from 'features/controlLayers/util/controlAdapters';
|
||||
import { configSelector } from 'features/system/store/configSelectors';
|
||||
import { includes, map } from 'lodash-es';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiXBold } from 'react-icons/pi';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
type Props = {
|
||||
config: ProcessorConfig | null;
|
||||
onChange: (config: ProcessorConfig | null) => void;
|
||||
};
|
||||
|
||||
const selectDisabledProcessors = createMemoizedSelector(
|
||||
configSelector,
|
||||
(config) => config.sd.disabledControlNetProcessors
|
||||
);
|
||||
|
||||
export const ControlAdapterProcessorTypeSelect = memo(({ config, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const disabledProcessors = useAppSelector(selectDisabledProcessors);
|
||||
const options = useMemo(() => {
|
||||
return map(CA_PROCESSOR_DATA, ({ labelTKey }, type) => ({ value: type, label: t(labelTKey) })).filter(
|
||||
(o) => !includes(disabledProcessors, o.value)
|
||||
);
|
||||
}, [disabledProcessors, t]);
|
||||
|
||||
const _onChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
if (!v) {
|
||||
onChange(null);
|
||||
} else {
|
||||
assert(isProcessorTypeV2(v.value));
|
||||
onChange(CA_PROCESSOR_DATA[v.value].buildDefaults());
|
||||
}
|
||||
},
|
||||
[onChange]
|
||||
);
|
||||
const clearProcessor = useCallback(() => {
|
||||
onChange(null);
|
||||
}, [onChange]);
|
||||
const value = useMemo(() => options.find((o) => o.value === config?.type) ?? null, [options, config?.type]);
|
||||
|
||||
return (
|
||||
<Flex gap={2}>
|
||||
<FormControl>
|
||||
<InformationalPopover feature="controlNetProcessor">
|
||||
<FormLabel m={0}>{t('controlnet.processor')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<Combobox value={value} options={options} onChange={_onChange} isSearchable={false} isClearable={false} />
|
||||
</FormControl>
|
||||
<IconButton
|
||||
aria-label={t('controlLayers.clearProcessor')}
|
||||
onClick={clearProcessor}
|
||||
isDisabled={!config}
|
||||
icon={<PiXBold />}
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
ControlAdapterProcessorTypeSelect.displayName = 'ControlAdapterProcessorTypeSelect';
|
||||
@@ -0,0 +1,55 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
type Props = {
|
||||
weight: number;
|
||||
onChange: (weight: number) => void;
|
||||
};
|
||||
|
||||
const formatValue = (v: number) => v.toFixed(2);
|
||||
const marks = [0, 1, 2];
|
||||
|
||||
export const ControlAdapterWeight = memo(({ weight, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const initial = useAppSelector((s) => s.config.sd.ca.weight.initial);
|
||||
const sliderMin = useAppSelector((s) => s.config.sd.ca.weight.sliderMin);
|
||||
const sliderMax = useAppSelector((s) => s.config.sd.ca.weight.sliderMax);
|
||||
const numberInputMin = useAppSelector((s) => s.config.sd.ca.weight.numberInputMin);
|
||||
const numberInputMax = useAppSelector((s) => s.config.sd.ca.weight.numberInputMax);
|
||||
const coarseStep = useAppSelector((s) => s.config.sd.ca.weight.coarseStep);
|
||||
const fineStep = useAppSelector((s) => s.config.sd.ca.weight.fineStep);
|
||||
|
||||
return (
|
||||
<FormControl orientation="horizontal">
|
||||
<InformationalPopover feature="controlNetWeight">
|
||||
<FormLabel m={0}>{t('controlnet.weight')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<CompositeSlider
|
||||
value={weight}
|
||||
onChange={onChange}
|
||||
defaultValue={initial}
|
||||
min={sliderMin}
|
||||
max={sliderMax}
|
||||
step={coarseStep}
|
||||
fineStep={fineStep}
|
||||
marks={marks}
|
||||
formatValue={formatValue}
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={weight}
|
||||
onChange={onChange}
|
||||
min={numberInputMin}
|
||||
max={numberInputMax}
|
||||
step={coarseStep}
|
||||
fineStep={fineStep}
|
||||
maxW={20}
|
||||
defaultValue={initial}
|
||||
/>
|
||||
</FormControl>
|
||||
);
|
||||
});
|
||||
|
||||
ControlAdapterWeight.displayName = 'ControlAdapterWeight';
|
||||
@@ -0,0 +1,72 @@
|
||||
import { Box, Flex } from '@invoke-ai/ui-library';
|
||||
import { ControlAdapterBeginEndStepPct } from 'features/controlLayers/components/ControlAndIPAdapter/ControlAdapterBeginEndStepPct';
|
||||
import { ControlAdapterWeight } from 'features/controlLayers/components/ControlAndIPAdapter/ControlAdapterWeight';
|
||||
import { IPAdapterImagePreview } from 'features/controlLayers/components/ControlAndIPAdapter/IPAdapterImagePreview';
|
||||
import { IPAdapterMethod } from 'features/controlLayers/components/ControlAndIPAdapter/IPAdapterMethod';
|
||||
import { IPAdapterModelSelect } from 'features/controlLayers/components/ControlAndIPAdapter/IPAdapterModelSelect';
|
||||
import type { CLIPVisionModelV2, IPAdapterConfigV2, IPMethodV2 } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { memo } from 'react';
|
||||
import type { ImageDTO, IPAdapterModelConfig, PostUploadAction } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
ipAdapter: IPAdapterConfigV2;
|
||||
onChangeBeginEndStepPct: (beginEndStepPct: [number, number]) => void;
|
||||
onChangeWeight: (weight: number) => void;
|
||||
onChangeIPMethod: (method: IPMethodV2) => void;
|
||||
onChangeModel: (modelConfig: IPAdapterModelConfig) => void;
|
||||
onChangeCLIPVisionModel: (clipVisionModel: CLIPVisionModelV2) => void;
|
||||
onChangeImage: (imageDTO: ImageDTO | null) => void;
|
||||
droppableData: TypesafeDroppableData;
|
||||
postUploadAction: PostUploadAction;
|
||||
};
|
||||
|
||||
export const IPAdapter = memo(
|
||||
({
|
||||
ipAdapter,
|
||||
onChangeBeginEndStepPct,
|
||||
onChangeWeight,
|
||||
onChangeIPMethod,
|
||||
onChangeModel,
|
||||
onChangeCLIPVisionModel,
|
||||
onChangeImage,
|
||||
droppableData,
|
||||
postUploadAction,
|
||||
}: Props) => {
|
||||
return (
|
||||
<Flex flexDir="column" gap={4} position="relative" w="full">
|
||||
<Flex gap={3} alignItems="center" w="full">
|
||||
<Box minW={0} w="full" transitionProperty="common" transitionDuration="0.1s">
|
||||
<IPAdapterModelSelect
|
||||
modelKey={ipAdapter.model?.key ?? null}
|
||||
onChangeModel={onChangeModel}
|
||||
clipVisionModel={ipAdapter.clipVisionModel}
|
||||
onChangeCLIPVisionModel={onChangeCLIPVisionModel}
|
||||
/>
|
||||
</Box>
|
||||
</Flex>
|
||||
<Flex gap={4} w="full" alignItems="center">
|
||||
<Flex flexDir="column" gap={3} w="full">
|
||||
<IPAdapterMethod method={ipAdapter.method} onChange={onChangeIPMethod} />
|
||||
<ControlAdapterWeight weight={ipAdapter.weight} onChange={onChangeWeight} />
|
||||
<ControlAdapterBeginEndStepPct
|
||||
beginEndStepPct={ipAdapter.beginEndStepPct}
|
||||
onChange={onChangeBeginEndStepPct}
|
||||
/>
|
||||
</Flex>
|
||||
<Flex alignItems="center" justifyContent="center" h={36} w={36} aspectRatio="1/1">
|
||||
<IPAdapterImagePreview
|
||||
image={ipAdapter.image}
|
||||
onChangeImage={onChangeImage}
|
||||
ipAdapterId={ipAdapter.id}
|
||||
droppableData={droppableData}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
</Flex>
|
||||
</Flex>
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
IPAdapter.displayName = 'IPAdapter';
|
||||
@@ -0,0 +1,113 @@
|
||||
import type { SystemStyleObject } from '@invoke-ai/ui-library';
|
||||
import { Flex, useShiftModifier } from '@invoke-ai/ui-library';
|
||||
import { skipToken } from '@reduxjs/toolkit/query';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIDndImage from 'common/components/IAIDndImage';
|
||||
import IAIDndImageIcon from 'common/components/IAIDndImageIcon';
|
||||
import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice';
|
||||
import { heightChanged, widthChanged } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { ImageWithDims } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { ImageDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
import { calculateNewSize } from 'features/parameters/components/ImageSize/calculateNewSize';
|
||||
import { selectOptimalDimension } from 'features/parameters/store/generationSlice';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { memo, useCallback, useEffect, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiArrowCounterClockwiseBold, PiRulerBold } from 'react-icons/pi';
|
||||
import { useGetImageDTOQuery } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO, PostUploadAction } from 'services/api/types';
|
||||
|
||||
type Props = {
|
||||
image: ImageWithDims | null;
|
||||
onChangeImage: (imageDTO: ImageDTO | null) => void;
|
||||
ipAdapterId: string; // required for the dnd/upload interactions
|
||||
droppableData: TypesafeDroppableData;
|
||||
postUploadAction: PostUploadAction;
|
||||
};
|
||||
|
||||
export const IPAdapterImagePreview = memo(
|
||||
({ image, onChangeImage, ipAdapterId, droppableData, postUploadAction }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const isConnected = useAppSelector((s) => s.system.isConnected);
|
||||
const activeTabName = useAppSelector(activeTabNameSelector);
|
||||
const optimalDimension = useAppSelector(selectOptimalDimension);
|
||||
const shift = useShiftModifier();
|
||||
|
||||
const { currentData: controlImage, isError: isErrorControlImage } = useGetImageDTOQuery(image?.name ?? skipToken);
|
||||
const handleResetControlImage = useCallback(() => {
|
||||
onChangeImage(null);
|
||||
}, [onChangeImage]);
|
||||
|
||||
const handleSetControlImageToDimensions = useCallback(() => {
|
||||
if (!controlImage) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (activeTabName === 'canvas') {
|
||||
dispatch(
|
||||
setBoundingBoxDimensions({ width: controlImage.width, height: controlImage.height }, optimalDimension)
|
||||
);
|
||||
} else {
|
||||
const options = { updateAspectRatio: true, clamp: true };
|
||||
if (shift) {
|
||||
const { width, height } = controlImage;
|
||||
dispatch(widthChanged({ width, ...options }));
|
||||
dispatch(heightChanged({ height, ...options }));
|
||||
} else {
|
||||
const { width, height } = calculateNewSize(
|
||||
controlImage.width / controlImage.height,
|
||||
optimalDimension * optimalDimension
|
||||
);
|
||||
dispatch(widthChanged({ width, ...options }));
|
||||
dispatch(heightChanged({ height, ...options }));
|
||||
}
|
||||
}
|
||||
}, [controlImage, activeTabName, dispatch, optimalDimension, shift]);
|
||||
|
||||
const draggableData = useMemo<ImageDraggableData | undefined>(() => {
|
||||
if (controlImage) {
|
||||
return {
|
||||
id: ipAdapterId,
|
||||
payloadType: 'IMAGE_DTO',
|
||||
payload: { imageDTO: controlImage },
|
||||
};
|
||||
}
|
||||
}, [controlImage, ipAdapterId]);
|
||||
|
||||
useEffect(() => {
|
||||
if (isConnected && isErrorControlImage) {
|
||||
handleResetControlImage();
|
||||
}
|
||||
}, [handleResetControlImage, isConnected, isErrorControlImage]);
|
||||
|
||||
return (
|
||||
<Flex position="relative" w="full" h={36} alignItems="center" justifyContent="center">
|
||||
<IAIDndImage
|
||||
draggableData={draggableData}
|
||||
droppableData={droppableData}
|
||||
imageDTO={controlImage}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
|
||||
<>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleResetControlImage}
|
||||
icon={controlImage ? <PiArrowCounterClockwiseBold size={16} /> : undefined}
|
||||
tooltip={t('controlnet.resetControlImage')}
|
||||
/>
|
||||
<IAIDndImageIcon
|
||||
onClick={handleSetControlImageToDimensions}
|
||||
icon={controlImage ? <PiRulerBold size={16} /> : undefined}
|
||||
tooltip={shift ? t('controlnet.setControlImageDimensionsForce') : t('controlnet.setControlImageDimensions')}
|
||||
styleOverrides={setControlImageDimensionsStyleOverrides}
|
||||
/>
|
||||
</>
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
IPAdapterImagePreview.displayName = 'IPAdapterImagePreview';
|
||||
|
||||
const setControlImageDimensionsStyleOverrides: SystemStyleObject = { mt: 6 };
|
||||
@@ -0,0 +1,44 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import type { IPMethodV2 } from 'features/controlLayers/util/controlAdapters';
|
||||
import { isIPMethodV2 } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
type Props = {
|
||||
method: IPMethodV2;
|
||||
onChange: (method: IPMethodV2) => void;
|
||||
};
|
||||
|
||||
export const IPAdapterMethod = memo(({ method, onChange }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const options: { label: string; value: IPMethodV2 }[] = useMemo(
|
||||
() => [
|
||||
{ label: t('controlnet.full'), value: 'full' },
|
||||
{ label: `${t('controlnet.style')} (${t('common.beta')})`, value: 'style' },
|
||||
{ label: `${t('controlnet.composition')} (${t('common.beta')})`, value: 'composition' },
|
||||
],
|
||||
[t]
|
||||
);
|
||||
const _onChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
assert(isIPMethodV2(v?.value));
|
||||
onChange(v.value);
|
||||
},
|
||||
[onChange]
|
||||
);
|
||||
const value = useMemo(() => options.find((o) => o.value === method), [options, method]);
|
||||
|
||||
return (
|
||||
<FormControl>
|
||||
<InformationalPopover feature="ipAdapterMethod">
|
||||
<FormLabel>{t('controlnet.ipAdapterMethod')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<Combobox value={value} options={options} onChange={_onChange} />
|
||||
</FormControl>
|
||||
);
|
||||
});
|
||||
|
||||
IPAdapterMethod.displayName = 'IPAdapterMethod';
|
||||
@@ -0,0 +1,100 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, Flex, FormControl, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
|
||||
import type { CLIPVisionModelV2 } from 'features/controlLayers/util/controlAdapters';
|
||||
import { isCLIPVisionModelV2 } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useIPAdapterModels } from 'services/api/hooks/modelsByType';
|
||||
import type { AnyModelConfig, IPAdapterModelConfig } from 'services/api/types';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const CLIP_VISION_OPTIONS = [
|
||||
{ label: 'ViT-H', value: 'ViT-H' },
|
||||
{ label: 'ViT-G', value: 'ViT-G' },
|
||||
];
|
||||
|
||||
type Props = {
|
||||
modelKey: string | null;
|
||||
onChangeModel: (modelConfig: IPAdapterModelConfig) => void;
|
||||
clipVisionModel: CLIPVisionModelV2;
|
||||
onChangeCLIPVisionModel: (clipVisionModel: CLIPVisionModelV2) => void;
|
||||
};
|
||||
|
||||
export const IPAdapterModelSelect = memo(
|
||||
({ modelKey, onChangeModel, clipVisionModel, onChangeCLIPVisionModel }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const currentBaseModel = useAppSelector((s) => s.generation.model?.base);
|
||||
const [modelConfigs, { isLoading }] = useIPAdapterModels();
|
||||
const selectedModel = useMemo(() => modelConfigs.find((m) => m.key === modelKey), [modelConfigs, modelKey]);
|
||||
|
||||
const _onChangeModel = useCallback(
|
||||
(modelConfig: IPAdapterModelConfig | null) => {
|
||||
if (!modelConfig) {
|
||||
return;
|
||||
}
|
||||
onChangeModel(modelConfig);
|
||||
},
|
||||
[onChangeModel]
|
||||
);
|
||||
|
||||
const _onChangeCLIPVisionModel = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
assert(isCLIPVisionModelV2(v?.value));
|
||||
onChangeCLIPVisionModel(v.value);
|
||||
},
|
||||
[onChangeCLIPVisionModel]
|
||||
);
|
||||
|
||||
const getIsDisabled = useCallback(
|
||||
(model: AnyModelConfig): boolean => {
|
||||
const isCompatible = currentBaseModel === model.base;
|
||||
const hasMainModel = Boolean(currentBaseModel);
|
||||
return !hasMainModel || !isCompatible;
|
||||
},
|
||||
[currentBaseModel]
|
||||
);
|
||||
|
||||
const { options, value, onChange, noOptionsMessage } = useGroupedModelCombobox({
|
||||
modelConfigs,
|
||||
onChange: _onChangeModel,
|
||||
selectedModel,
|
||||
getIsDisabled,
|
||||
isLoading,
|
||||
});
|
||||
|
||||
const clipVisionModelValue = useMemo(
|
||||
() => CLIP_VISION_OPTIONS.find((o) => o.value === clipVisionModel),
|
||||
[clipVisionModel]
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex gap={4}>
|
||||
<Tooltip label={selectedModel?.description}>
|
||||
<FormControl isInvalid={!value || currentBaseModel !== selectedModel?.base} w="full">
|
||||
<Combobox
|
||||
options={options}
|
||||
placeholder={t('controlnet.selectModel')}
|
||||
value={value}
|
||||
onChange={onChange}
|
||||
noOptionsMessage={noOptionsMessage}
|
||||
/>
|
||||
</FormControl>
|
||||
</Tooltip>
|
||||
{selectedModel?.format === 'checkpoint' && (
|
||||
<FormControl isInvalid={!value || currentBaseModel !== selectedModel?.base} width="max-content" minWidth={28}>
|
||||
<Combobox
|
||||
options={CLIP_VISION_OPTIONS}
|
||||
placeholder={t('controlnet.selectCLIPVisionModel')}
|
||||
value={clipVisionModelValue}
|
||||
onChange={_onChangeCLIPVisionModel}
|
||||
/>
|
||||
</FormControl>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
IPAdapterModelSelect.displayName = 'IPAdapterModelSelect';
|
||||
@@ -0,0 +1,67 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import { CA_PROCESSOR_DATA, type CannyProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<CannyProcessorConfig>;
|
||||
const DEFAULTS = CA_PROCESSOR_DATA['canny_image_processor'].buildDefaults();
|
||||
|
||||
export const CannyProcessor = ({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const handleLowThresholdChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, low_threshold: v });
|
||||
},
|
||||
[onChange, config]
|
||||
);
|
||||
const handleHighThresholdChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, high_threshold: v });
|
||||
},
|
||||
[onChange, config]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.lowThreshold')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.low_threshold}
|
||||
onChange={handleLowThresholdChanged}
|
||||
defaultValue={DEFAULTS.low_threshold}
|
||||
min={0}
|
||||
max={255}
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.low_threshold}
|
||||
onChange={handleLowThresholdChanged}
|
||||
defaultValue={DEFAULTS.low_threshold}
|
||||
min={0}
|
||||
max={255}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.highThreshold')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.high_threshold}
|
||||
onChange={handleHighThresholdChanged}
|
||||
defaultValue={DEFAULTS.high_threshold}
|
||||
min={0}
|
||||
max={255}
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.high_threshold}
|
||||
onChange={handleHighThresholdChanged}
|
||||
defaultValue={DEFAULTS.high_threshold}
|
||||
min={0}
|
||||
max={255}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
};
|
||||
|
||||
CannyProcessor.displayName = 'CannyProcessor';
|
||||
@@ -0,0 +1,47 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import { CA_PROCESSOR_DATA, type ColorMapProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<ColorMapProcessorConfig>;
|
||||
const DEFAULTS = CA_PROCESSOR_DATA['color_map_image_processor'].buildDefaults();
|
||||
|
||||
export const ColorMapProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const handleColorMapTileSizeChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, color_map_tile_size: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.colorMapTileSize')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.color_map_tile_size}
|
||||
defaultValue={DEFAULTS.color_map_tile_size}
|
||||
onChange={handleColorMapTileSizeChanged}
|
||||
min={1}
|
||||
max={256}
|
||||
step={1}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.color_map_tile_size}
|
||||
defaultValue={DEFAULTS.color_map_tile_size}
|
||||
onChange={handleColorMapTileSizeChanged}
|
||||
min={1}
|
||||
max={4096}
|
||||
step={1}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
ColorMapProcessor.displayName = 'ColorMapProcessor';
|
||||
@@ -0,0 +1,79 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { ContentShuffleProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { CA_PROCESSOR_DATA } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<ContentShuffleProcessorConfig>;
|
||||
const DEFAULTS = CA_PROCESSOR_DATA['content_shuffle_image_processor'].buildDefaults();
|
||||
|
||||
export const ContentShuffleProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleWChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, w: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleHChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, h: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleFChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, f: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.w')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.w}
|
||||
defaultValue={DEFAULTS.w}
|
||||
onChange={handleWChanged}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput value={config.w} defaultValue={DEFAULTS.w} onChange={handleWChanged} min={0} max={4096} />
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.h')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.h}
|
||||
defaultValue={DEFAULTS.h}
|
||||
onChange={handleHChanged}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput value={config.h} defaultValue={DEFAULTS.h} onChange={handleHChanged} min={0} max={4096} />
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.f')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.f}
|
||||
defaultValue={DEFAULTS.f}
|
||||
onChange={handleFChanged}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput value={config.f} defaultValue={DEFAULTS.f} onChange={handleFChanged} min={0} max={4096} />
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
ContentShuffleProcessor.displayName = 'ContentShuffleProcessor';
|
||||
@@ -0,0 +1,62 @@
|
||||
import { Flex, FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { DWOpenposeProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { CA_PROCESSOR_DATA } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<DWOpenposeProcessorConfig>;
|
||||
const DEFAULTS = CA_PROCESSOR_DATA['dw_openpose_image_processor'].buildDefaults();
|
||||
|
||||
export const DWOpenposeProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleDrawBodyChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, draw_body: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleDrawFaceChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, draw_face: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleDrawHandsChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, draw_hands: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<Flex sx={{ flexDir: 'row', gap: 6 }}>
|
||||
<FormControl w="max-content">
|
||||
<FormLabel m={0}>{t('controlnet.body')}</FormLabel>
|
||||
<Switch defaultChecked={DEFAULTS.draw_body} isChecked={config.draw_body} onChange={handleDrawBodyChanged} />
|
||||
</FormControl>
|
||||
<FormControl w="max-content">
|
||||
<FormLabel m={0}>{t('controlnet.face')}</FormLabel>
|
||||
<Switch defaultChecked={DEFAULTS.draw_face} isChecked={config.draw_face} onChange={handleDrawFaceChanged} />
|
||||
</FormControl>
|
||||
<FormControl w="max-content">
|
||||
<FormLabel m={0}>{t('controlnet.hands')}</FormLabel>
|
||||
<Switch
|
||||
defaultChecked={DEFAULTS.draw_hands}
|
||||
isChecked={config.draw_hands}
|
||||
onChange={handleDrawHandsChanged}
|
||||
/>
|
||||
</FormControl>
|
||||
</Flex>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
DWOpenposeProcessor.displayName = 'DWOpenposeProcessor';
|
||||
@@ -0,0 +1,46 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { DepthAnythingModelSize, DepthAnythingProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { isDepthAnythingModelSize } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<DepthAnythingProcessorConfig>;
|
||||
|
||||
export const DepthAnythingProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const handleModelSizeChange = useCallback<ComboboxOnChange>(
|
||||
(v) => {
|
||||
if (!isDepthAnythingModelSize(v?.value)) {
|
||||
return;
|
||||
}
|
||||
onChange({ ...config, model_size: v.value });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const options: { label: string; value: DepthAnythingModelSize }[] = useMemo(
|
||||
() => [
|
||||
{ label: t('controlnet.small'), value: 'small' },
|
||||
{ label: t('controlnet.base'), value: 'base' },
|
||||
{ label: t('controlnet.large'), value: 'large' },
|
||||
],
|
||||
[t]
|
||||
);
|
||||
|
||||
const value = useMemo(() => options.filter((o) => o.value === config.model_size)[0], [options, config.model_size]);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.modelSize')}</FormLabel>
|
||||
<Combobox value={value} options={options} onChange={handleModelSizeChange} isSearchable={false} />
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
DepthAnythingProcessor.displayName = 'DepthAnythingProcessor';
|
||||
@@ -0,0 +1,32 @@
|
||||
import { FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { HedProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<HedProcessorConfig>;
|
||||
|
||||
export const HedProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleScribbleChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, scribble: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.scribble')}</FormLabel>
|
||||
<Switch isChecked={config.scribble} onChange={handleScribbleChanged} />
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
HedProcessor.displayName = 'HedProcessor';
|
||||
@@ -0,0 +1,32 @@
|
||||
import { FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { LineartProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<LineartProcessorConfig>;
|
||||
|
||||
export const LineartProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleCoarseChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, coarse: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.coarse')}</FormLabel>
|
||||
<Switch isChecked={config.coarse} onChange={handleCoarseChanged} />
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
LineartProcessor.displayName = 'LineartProcessor';
|
||||
@@ -0,0 +1,73 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import { CA_PROCESSOR_DATA, type MediapipeFaceProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<MediapipeFaceProcessorConfig>;
|
||||
const DEFAULTS = CA_PROCESSOR_DATA['mediapipe_face_processor'].buildDefaults();
|
||||
|
||||
export const MediapipeFaceProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleMaxFacesChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, max_faces: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleMinConfidenceChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, min_confidence: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.maxFaces')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.max_faces}
|
||||
onChange={handleMaxFacesChanged}
|
||||
defaultValue={DEFAULTS.max_faces}
|
||||
min={1}
|
||||
max={20}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.max_faces}
|
||||
onChange={handleMaxFacesChanged}
|
||||
defaultValue={DEFAULTS.max_faces}
|
||||
min={1}
|
||||
max={20}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.minConfidence')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.min_confidence}
|
||||
onChange={handleMinConfidenceChanged}
|
||||
defaultValue={DEFAULTS.min_confidence}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.min_confidence}
|
||||
onChange={handleMinConfidenceChanged}
|
||||
defaultValue={DEFAULTS.min_confidence}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
MediapipeFaceProcessor.displayName = 'MediapipeFaceProcessor';
|
||||
@@ -0,0 +1,76 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { MidasDepthProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { CA_PROCESSOR_DATA } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<MidasDepthProcessorConfig>;
|
||||
const DEFAULTS = CA_PROCESSOR_DATA['midas_depth_image_processor'].buildDefaults();
|
||||
|
||||
export const MidasDepthProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleAMultChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, a_mult: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleBgThChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, bg_th: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.amult')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.a_mult}
|
||||
onChange={handleAMultChanged}
|
||||
defaultValue={DEFAULTS.a_mult}
|
||||
min={0}
|
||||
max={20}
|
||||
step={0.01}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.a_mult}
|
||||
onChange={handleAMultChanged}
|
||||
defaultValue={DEFAULTS.a_mult}
|
||||
min={0}
|
||||
max={20}
|
||||
step={0.01}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.bgth')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.bg_th}
|
||||
onChange={handleBgThChanged}
|
||||
defaultValue={DEFAULTS.bg_th}
|
||||
min={0}
|
||||
max={20}
|
||||
step={0.01}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.bg_th}
|
||||
onChange={handleBgThChanged}
|
||||
defaultValue={DEFAULTS.bg_th}
|
||||
min={0}
|
||||
max={20}
|
||||
step={0.01}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
MidasDepthProcessor.displayName = 'MidasDepthProcessor';
|
||||
@@ -0,0 +1,76 @@
|
||||
import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { MlsdProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import { CA_PROCESSOR_DATA } from 'features/controlLayers/util/controlAdapters';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<MlsdProcessorConfig>;
|
||||
const DEFAULTS = CA_PROCESSOR_DATA['mlsd_image_processor'].buildDefaults();
|
||||
|
||||
export const MlsdImageProcessor = memo(({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleThrDChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, thr_d: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleThrVChanged = useCallback(
|
||||
(v: number) => {
|
||||
onChange({ ...config, thr_v: v });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.w')} </FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.thr_d}
|
||||
onChange={handleThrDChanged}
|
||||
defaultValue={DEFAULTS.thr_d}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.thr_d}
|
||||
onChange={handleThrDChanged}
|
||||
defaultValue={DEFAULTS.thr_d}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.h')} </FormLabel>
|
||||
<CompositeSlider
|
||||
value={config.thr_v}
|
||||
onChange={handleThrVChanged}
|
||||
defaultValue={DEFAULTS.thr_v}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={config.thr_v}
|
||||
onChange={handleThrVChanged}
|
||||
defaultValue={DEFAULTS.thr_v}
|
||||
min={0}
|
||||
max={1}
|
||||
step={0.01}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
});
|
||||
|
||||
MlsdImageProcessor.displayName = 'MlsdImageProcessor';
|
||||
@@ -0,0 +1,43 @@
|
||||
import { FormControl, FormLabel, Switch } from '@invoke-ai/ui-library';
|
||||
import type { ProcessorComponentProps } from 'features/controlLayers/components/ControlAndIPAdapter/processors/types';
|
||||
import type { PidiProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
import type { ChangeEvent } from 'react';
|
||||
import { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ProcessorWrapper from './ProcessorWrapper';
|
||||
|
||||
type Props = ProcessorComponentProps<PidiProcessorConfig>;
|
||||
|
||||
export const PidiProcessor = ({ onChange, config }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleScribbleChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, scribble: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
const handleSafeChanged = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
onChange({ ...config, safe: e.target.checked });
|
||||
},
|
||||
[config, onChange]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.scribble')}</FormLabel>
|
||||
<Switch isChecked={config.scribble} onChange={handleScribbleChanged} />
|
||||
</FormControl>
|
||||
<FormControl>
|
||||
<FormLabel m={0}>{t('controlnet.safe')}</FormLabel>
|
||||
<Switch isChecked={config.safe} onChange={handleSafeChanged} />
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
};
|
||||
|
||||
PidiProcessor.displayName = 'PidiProcessor';
|
||||
@@ -0,0 +1,15 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import type { PropsWithChildren } from 'react';
|
||||
import { memo } from 'react';
|
||||
|
||||
type Props = PropsWithChildren;
|
||||
|
||||
const ProcessorWrapper = (props: Props) => {
|
||||
return (
|
||||
<Flex flexDir="column" gap={3}>
|
||||
{props.children}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(ProcessorWrapper);
|
||||
@@ -0,0 +1,6 @@
|
||||
import type { ProcessorConfig } from 'features/controlLayers/util/controlAdapters';
|
||||
|
||||
export type ProcessorComponentProps<T extends ProcessorConfig> = {
|
||||
onChange: (config: T) => void;
|
||||
config: T;
|
||||
};
|
||||
@@ -0,0 +1,24 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import type { Meta, StoryObj } from '@storybook/react';
|
||||
import { ControlLayersEditor } from 'features/controlLayers/components/ControlLayersEditor';
|
||||
|
||||
const meta: Meta<typeof ControlLayersEditor> = {
|
||||
title: 'Feature/ControlLayers',
|
||||
tags: ['autodocs'],
|
||||
component: ControlLayersEditor,
|
||||
};
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj<typeof ControlLayersEditor>;
|
||||
|
||||
const Component = () => {
|
||||
return (
|
||||
<Flex w={1500} h={1500}>
|
||||
<ControlLayersEditor />
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
export const Default: Story = {
|
||||
render: Component,
|
||||
};
|
||||
@@ -1,10 +1,10 @@
|
||||
/* eslint-disable i18next/no-literal-string */
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { RegionalPromptsToolbar } from 'features/regionalPrompts/components/RegionalPromptsToolbar';
|
||||
import { StageComponent } from 'features/regionalPrompts/components/StageComponent';
|
||||
import { ControlLayersToolbar } from 'features/controlLayers/components/ControlLayersToolbar';
|
||||
import { StageComponent } from 'features/controlLayers/components/StageComponent';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const RegionalPromptsEditor = memo(() => {
|
||||
export const ControlLayersEditor = memo(() => {
|
||||
return (
|
||||
<Flex
|
||||
position="relative"
|
||||
@@ -15,10 +15,10 @@ export const RegionalPromptsEditor = memo(() => {
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
>
|
||||
<RegionalPromptsToolbar />
|
||||
<ControlLayersToolbar />
|
||||
<StageComponent />
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
RegionalPromptsEditor.displayName = 'RegionalPromptsEditor';
|
||||
ControlLayersEditor.displayName = 'ControlLayersEditor';
|
||||
@@ -0,0 +1,69 @@
|
||||
/* eslint-disable i18next/no-literal-string */
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
||||
import { AddLayerButton } from 'features/controlLayers/components/AddLayerButton';
|
||||
import { CALayer } from 'features/controlLayers/components/CALayer/CALayer';
|
||||
import { DeleteAllLayersButton } from 'features/controlLayers/components/DeleteAllLayersButton';
|
||||
import { IILayer } from 'features/controlLayers/components/IILayer/IILayer';
|
||||
import { IPALayer } from 'features/controlLayers/components/IPALayer/IPALayer';
|
||||
import { RGLayer } from 'features/controlLayers/components/RGLayer/RGLayer';
|
||||
import { isRenderableLayer, selectControlLayersSlice } from 'features/controlLayers/store/controlLayersSlice';
|
||||
import type { Layer } from 'features/controlLayers/store/types';
|
||||
import { partition } from 'lodash-es';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const selectLayerIdTypePairs = createMemoizedSelector(selectControlLayersSlice, (controlLayers) => {
|
||||
const [renderableLayers, ipAdapterLayers] = partition(controlLayers.present.layers, isRenderableLayer);
|
||||
return [...ipAdapterLayers, ...renderableLayers].map((l) => ({ id: l.id, type: l.type })).reverse();
|
||||
});
|
||||
|
||||
export const ControlLayersPanelContent = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const layerIdTypePairs = useAppSelector(selectLayerIdTypePairs);
|
||||
return (
|
||||
<Flex flexDir="column" gap={2} w="full" h="full">
|
||||
<Flex justifyContent="space-around">
|
||||
<AddLayerButton />
|
||||
<DeleteAllLayersButton />
|
||||
</Flex>
|
||||
{layerIdTypePairs.length > 0 && (
|
||||
<ScrollableContent>
|
||||
<Flex flexDir="column" gap={2} data-testid="control-layers-layer-list">
|
||||
{layerIdTypePairs.map(({ id, type }) => (
|
||||
<LayerWrapper key={id} id={id} type={type} />
|
||||
))}
|
||||
</Flex>
|
||||
</ScrollableContent>
|
||||
)}
|
||||
{layerIdTypePairs.length === 0 && <IAINoContentFallback icon={null} label={t('controlLayers.noLayersAdded')} />}
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
ControlLayersPanelContent.displayName = 'ControlLayersPanelContent';
|
||||
|
||||
type LayerWrapperProps = {
|
||||
id: string;
|
||||
type: Layer['type'];
|
||||
};
|
||||
|
||||
const LayerWrapper = memo(({ id, type }: LayerWrapperProps) => {
|
||||
if (type === 'regional_guidance_layer') {
|
||||
return <RGLayer key={id} layerId={id} />;
|
||||
}
|
||||
if (type === 'control_adapter_layer') {
|
||||
return <CALayer key={id} layerId={id} />;
|
||||
}
|
||||
if (type === 'ip_adapter_layer') {
|
||||
return <IPALayer key={id} layerId={id} />;
|
||||
}
|
||||
if (type === 'initial_image_layer') {
|
||||
return <IILayer key={id} layerId={id} />;
|
||||
}
|
||||
});
|
||||
|
||||
LayerWrapper.displayName = 'LayerWrapper';
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user