Compare commits
203 Commits
lstein/thr
...
feat/contr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
907ff165be | ||
|
|
53c8c3b4f5 | ||
|
|
8262c31866 | ||
|
|
b940ae8dbb | ||
|
|
845d1524ad | ||
|
|
6c82b694a7 | ||
|
|
f1fcc3fb74 | ||
|
|
2dd59d31d0 | ||
|
|
3f79812dc6 | ||
|
|
055b2207cb | ||
|
|
19cdd5a99b | ||
|
|
5db66e00b6 | ||
|
|
76337e13f5 | ||
|
|
eb4ca4042e | ||
|
|
594bf6fef1 | ||
|
|
6f2e8d5217 | ||
|
|
52ae15c167 | ||
|
|
2c4128d44e | ||
|
|
01b106d939 | ||
|
|
68f1f87c6f | ||
|
|
c2c99b8650 | ||
|
|
896b77cf56 | ||
|
|
6f7d221f57 | ||
|
|
fba4085939 | ||
|
|
48ad005732 | ||
|
|
9ce4bd1182 | ||
|
|
39b7ace273 | ||
|
|
319c56f844 | ||
|
|
389a0d2810 | ||
|
|
fe33acedad | ||
|
|
eab18c7385 | ||
|
|
8e98085530 | ||
|
|
5396e998b3 | ||
|
|
fc98089960 | ||
|
|
dd0b4dc744 | ||
|
|
ddeba190bc | ||
|
|
3a610e1a65 | ||
|
|
e10e22440d | ||
|
|
f4e8a91bcf | ||
|
|
ce7fbdb01d | ||
|
|
4da6623700 | ||
|
|
0e3ca59e49 | ||
|
|
e06f2229ac | ||
|
|
5962d96f27 | ||
|
|
d4854c4fac | ||
|
|
46801c076f | ||
|
|
9370572169 | ||
|
|
ace65325ff | ||
|
|
e6d890888c | ||
|
|
8e7f581065 | ||
|
|
85ef3f51e7 | ||
|
|
8fdc8a8da5 | ||
|
|
52d56e96a5 | ||
|
|
c013fe5b5d | ||
|
|
ddf7ddc2c1 | ||
|
|
4a0774b260 | ||
|
|
17e401cb8c | ||
|
|
29a590cced | ||
|
|
7deafa838b | ||
|
|
20757d1c02 | ||
|
|
5134de7cfa | ||
|
|
b1a6ba552b | ||
|
|
cd21d2f2b6 | ||
|
|
9dc28373d8 | ||
|
|
ffe7d5785b | ||
|
|
a2e2f0858d | ||
|
|
f73c70ca96 | ||
|
|
e2240feae4 | ||
|
|
e06348bfab | ||
|
|
8fb970d436 | ||
|
|
15256ed3a4 | ||
|
|
89a15f78dd | ||
|
|
8fc20c837b | ||
|
|
8dfe196c4f | ||
|
|
9e27fd9b90 | ||
|
|
2771328853 | ||
|
|
a481607d3f | ||
|
|
1e3cebbf42 | ||
|
|
d523556558 | ||
|
|
da523fa32f | ||
|
|
ab9b5f3b95 | ||
|
|
f32bd5dd10 | ||
|
|
190ba5af59 | ||
|
|
cb29ac63a8 | ||
|
|
603989dc0d | ||
|
|
2872ae2aab | ||
|
|
b7cdda0781 | ||
|
|
267940a77e | ||
|
|
8d77c5ca96 | ||
|
|
0795d8764f | ||
|
|
2db56306e4 | ||
|
|
70fec9ddab | ||
|
|
909f538fb5 | ||
|
|
bab8b6d240 | ||
|
|
f2f49bd8d0 | ||
|
|
b8e0810ed1 | ||
|
|
6cb9167a1b | ||
|
|
09dfcc4277 | ||
|
|
82eb1f1075 | ||
|
|
187cf906fa | ||
|
|
82554b25fe | ||
|
|
039091c5d4 | ||
|
|
d76bf4444c | ||
|
|
82496fee14 | ||
|
|
c2b99e7545 | ||
|
|
e918168f7a | ||
|
|
6e36c275c9 | ||
|
|
6affe42310 | ||
|
|
170bbd7da3 | ||
|
|
f6d5e93020 | ||
|
|
f2515d9480 | ||
|
|
4d8f17c69d | ||
|
|
3a987b2e72 | ||
|
|
4e3f58552c | ||
|
|
77d9657980 | ||
|
|
12cae33dcd | ||
|
|
1e5310793c | ||
|
|
a0b5930340 | ||
|
|
53ed252168 | ||
|
|
a683379dda | ||
|
|
899aa1d251 | ||
|
|
5f940bf3b3 | ||
|
|
1cd814cba0 | ||
|
|
a1251c8e04 | ||
|
|
509514f11d | ||
|
|
c557402dbb | ||
|
|
495df9fd1b | ||
|
|
3db9a07eea | ||
|
|
9fd7eb2e0e | ||
|
|
9263f1090e | ||
|
|
135ab0a3e8 | ||
|
|
b9b89ad210 | ||
|
|
72c19987d5 | ||
|
|
8439e30798 | ||
|
|
84d6578855 | ||
|
|
0073fc8619 | ||
|
|
2fbc6dc315 | ||
|
|
be95fd753e | ||
|
|
0724eb9e0a | ||
|
|
6a4440e52b | ||
|
|
07c48b2fd1 | ||
|
|
055f5b2d4b | ||
|
|
fface339ae | ||
|
|
2ec9dab595 | ||
|
|
9f00e055ac | ||
|
|
aca5c6de9a | ||
|
|
c291b82b94 | ||
|
|
f9320475fd | ||
|
|
9c3a556813 | ||
|
|
0b6ef7eb7d | ||
|
|
6ba48af0a9 | ||
|
|
40fffec0b6 | ||
|
|
23f0c7035c | ||
|
|
94787b7251 | ||
|
|
d8db618de0 | ||
|
|
5ae2fb0d2b | ||
|
|
5b1d7a2367 | ||
|
|
f3ae9c513e | ||
|
|
ff74370eda | ||
|
|
19d67b29e7 | ||
|
|
52e7e0b31b | ||
|
|
446d87516a | ||
|
|
2e7fc055c4 | ||
|
|
0f7e329e76 | ||
|
|
a690cca5b5 | ||
|
|
f29bafd6ec | ||
|
|
632346b2e2 | ||
|
|
0f18898865 | ||
|
|
700131fab2 | ||
|
|
634d6bb8a6 | ||
|
|
0aa7193d3b | ||
|
|
2fbf245c3d | ||
|
|
39c14eb2ac | ||
|
|
c89fa4b635 | ||
|
|
e943913f58 | ||
|
|
4ada094c5c | ||
|
|
893e199677 | ||
|
|
3c5a0c95b3 | ||
|
|
71a07ee5a7 | ||
|
|
2a0a765ec4 | ||
|
|
ec08151009 | ||
|
|
186e98da5e | ||
|
|
dea9a5da7a | ||
|
|
bda0000acd | ||
|
|
4b678f2416 | ||
|
|
43fbbfb848 | ||
|
|
b71bcab691 | ||
|
|
c364c85915 | ||
|
|
3773bfbc74 | ||
|
|
949437b4f0 | ||
|
|
efcb3a9d08 | ||
|
|
b4eeaaa63c | ||
|
|
54bd7c7f04 | ||
|
|
3240f98f4e | ||
|
|
3fa7170566 | ||
|
|
187d7c1cab | ||
|
|
9685760fac | ||
|
|
b0fb4950ed | ||
|
|
42c440c73f | ||
|
|
416afd2781 | ||
|
|
afa84a149c | ||
|
|
be659364c2 | ||
|
|
56098f370c |
4
.github/workflows/mkdocs-material.yml
vendored
@@ -2,7 +2,7 @@ name: mkdocs-material
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'refs/heads/v2.3'
|
||||
- 'refs/heads/main'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
--verbose
|
||||
|
||||
- name: deploy to gh-pages
|
||||
if: ${{ github.ref == 'refs/heads/v2.3' }}
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: |
|
||||
python -m \
|
||||
mkdocs gh-deploy \
|
||||
|
||||
59
README.md
@@ -36,15 +36,6 @@
|
||||
|
||||
</div>
|
||||
|
||||
_**Note: This is an alpha release. Bugs are expected and not all
|
||||
features are fully implemented. Please use the GitHub [Issues
|
||||
pages](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen)
|
||||
to report unexpected problems. Also note that InvokeAI root directory
|
||||
which contains models, outputs and configuration files, has changed
|
||||
between the 2.x and 3.x release. If you wish to use your v2.3 root
|
||||
directory with v3.0, please follow the directions in [Migrating a 2.3
|
||||
root directory to 3.0](#migrating-to-3).**_
|
||||
|
||||
InvokeAI is a leading creative engine built to empower professionals
|
||||
and enthusiasts alike. Generate and create stunning visual media using
|
||||
the latest AI-driven technologies. InvokeAI offers an industry leading
|
||||
@@ -264,19 +255,24 @@ old models directory (which contains the models selected at install
|
||||
time) will be renamed `models.orig` and can be deleted once you have
|
||||
confirmed that the migration was successful.
|
||||
|
||||
If you wish, you can pass the 2.3 root directory to both `--from` and
|
||||
`--to` in order to update in place. Warning: this directory will no
|
||||
longer be usable with InvokeAI 2.3.
|
||||
|
||||
#### Migrating in place
|
||||
|
||||
For the adventurous, you may do an in-place upgrade from 2.3 to 3.0
|
||||
without touching the command line. The recipe is as follows>
|
||||
without touching the command line. ***This recipe does not work on
|
||||
Windows platforms due to a bug in the Windows version of the 2.3
|
||||
upgrade script.** See the next section for a Windows recipe.
|
||||
|
||||
##### For Mac and Linux Users:
|
||||
|
||||
1. Launch the InvokeAI launcher script in your current v2.3 root directory.
|
||||
|
||||
2. Select option [9] "Update InvokeAI" to bring up the updater dialog.
|
||||
|
||||
3a. During the alpha release phase, select option [3] and manually
|
||||
enter the tag name `v3.0.0+a2`.
|
||||
|
||||
3b. Once 3.0 is released, select option [1] to upgrade to the latest release.
|
||||
3. Select option [1] to upgrade to the latest release.
|
||||
|
||||
4. Once the upgrade is finished you will be returned to the launcher
|
||||
menu. Select option [7] "Re-run the configure script to fix a broken
|
||||
@@ -295,14 +291,33 @@ worked, you can safely remove these files. Alternatively you can
|
||||
restore a working v2.3 directory by removing the new files and
|
||||
restoring the ".orig" files' original names.
|
||||
|
||||
##### For Windows Users:
|
||||
|
||||
Windows Users can upgrade with the
|
||||
|
||||
1. Enter the 2.3 root directory you wish to upgrade
|
||||
2. Launch `invoke.sh` or `invoke.bat`
|
||||
3. Select the "Developer's console" option [8]
|
||||
4. Type the following commands
|
||||
|
||||
```
|
||||
pip install "invokeai @ https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v3.0.0" --use-pep517 --upgrade
|
||||
invokeai-configure --root .
|
||||
```
|
||||
(Replace `v3.0.0` with the current release number if this document is out of date).
|
||||
|
||||
The first command will install and upgrade new software to run
|
||||
InvokeAI. The second will prepare the 2.3 directory for use with 3.0.
|
||||
You may now launch the WebUI in the usual way, by selecting option [1]
|
||||
from the launcher script
|
||||
|
||||
#### Migration Caveats
|
||||
|
||||
The migration script will migrate your invokeai settings and models,
|
||||
including textual inversion models, LoRAs and merges that you may have
|
||||
installed previously. However it does **not** migrate the generated
|
||||
images stored in your 2.3-format outputs directory. The released
|
||||
version of 3.0 is expected to have an interface for importing an
|
||||
entire directory of image files as a batch.
|
||||
images stored in your 2.3-format outputs directory. You will need to
|
||||
manually import selected images into the 3.0 gallery via drag-and-drop.
|
||||
|
||||
## Hardware Requirements
|
||||
|
||||
@@ -314,9 +329,12 @@ AMD card (using the ROCm driver).
|
||||
|
||||
You will need one of the following:
|
||||
|
||||
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- An NVIDIA-based graphics card with 4 GB or more VRAM memory. 6-8 GB
|
||||
of VRAM is highly recommended for rendering using the Stable
|
||||
Diffusion XL models
|
||||
- An Apple computer with an M1 chip.
|
||||
- An AMD-based graphics card with 4GB or more VRAM memory. (Linux only)
|
||||
- An AMD-based graphics card with 4GB or more VRAM memory (Linux
|
||||
only), 6-8 GB for XL rendering.
|
||||
|
||||
We do not recommend the GTX 1650 or 1660 series video cards. They are
|
||||
unable to run in half-precision mode and do not have sufficient VRAM
|
||||
@@ -349,13 +367,12 @@ Invoke AI provides an organized gallery system for easily storing, accessing, an
|
||||
### Other features
|
||||
|
||||
- *Support for both ckpt and diffusers models*
|
||||
- *SD 2.0, 2.1 support*
|
||||
- *SD 2.0, 2.1, XL support*
|
||||
- *Upscaling Tools*
|
||||
- *Embedding Manager & Support*
|
||||
- *Model Manager & Support*
|
||||
- *Node-Based Architecture*
|
||||
- *Node-Based Plug-&-Play UI (Beta)*
|
||||
- *SDXL Support* (Coming soon)
|
||||
|
||||
### Latest Changes
|
||||
|
||||
|
||||
@@ -617,8 +617,6 @@ sections describe what's new for InvokeAI.
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains for
|
||||
backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for [inpainting](deprecated/INPAINTING.md) and
|
||||
[outpainting](features/OUTPAINTING.md)
|
||||
- img2img runs on all k\* samplers
|
||||
- Support for
|
||||
[negative prompts](features/PROMPTS.md#negative-and-unconditioned-prompts)
|
||||
|
||||
BIN
docs/assets/control-panel-2.png
Normal file
|
After Width: | Height: | Size: 415 KiB |
BIN
docs/assets/installing-models/model-installer-controlnet.png
Normal file
|
After Width: | Height: | Size: 57 KiB |
BIN
docs/assets/invoke-control-panel-1.png
Normal file
|
After Width: | Height: | Size: 37 KiB |
|
Before Width: | Height: | Size: 983 KiB After Width: | Height: | Size: 1.1 MiB |
|
Before Width: | Height: | Size: 101 KiB After Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 16 KiB |
|
Before Width: | Height: | Size: 148 KiB After Width: | Height: | Size: 76 KiB |
|
Before Width: | Height: | Size: 637 KiB After Width: | Height: | Size: 729 KiB |
BIN
docs/assets/lora-example-0.png
Normal file
|
After Width: | Height: | Size: 530 KiB |
BIN
docs/assets/lora-example-1.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
docs/assets/lora-example-2.png
Normal file
|
After Width: | Height: | Size: 8.5 KiB |
BIN
docs/assets/lora-example-3.png
Normal file
|
After Width: | Height: | Size: 409 KiB |
1
docs/assets/sdxl-graphs/sdxl-base-example1.json
Normal file
1
docs/assets/sdxl-graphs/sdxl-base-refine-example1.json
Normal file
BIN
docs/assets/send-to-icon.png
Normal file
|
After Width: | Height: | Size: 41 KiB |
BIN
docs/assets/upscaling.png
Normal file
|
After Width: | Height: | Size: 637 KiB |
@@ -1,42 +1,38 @@
|
||||
# How to Contribute
|
||||
|
||||
## Welcome to Invoke AI
|
||||
|
||||
We're thrilled to have you here and we're excited for you to contribute.
|
||||
|
||||
Invoke AI originated as a project built by the community, and that vision carries forward today as we aim to build the best pro-grade tools available. We work together to incorporate the latest in AI/ML research, making these tools available in over 20 languages to artists and creatives around the world as part of our fully permissive OSS project designed for individual users to self-host and use.
|
||||
|
||||
Here are some guidelines to help you get started:
|
||||
|
||||
### Technical Prerequisites
|
||||
## Contributing to Invoke AI
|
||||
Anyone who wishes to contribute to InvokeAI, whether features, bug fixes, code cleanup, testing, code reviews, documentation or translation is very much encouraged to do so.
|
||||
|
||||
Front-end: You'll need a working knowledge of React and TypeScript.
|
||||
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
||||
|
||||
Back-end: Depending on the scope of your contribution, you may need to know SQLite, FastAPI, Python, and Socketio. Also, a good majority of the backend logic involved in processing images is built in a modular way using a concept called "Nodes", which are isolated functions that carry out individual, discrete operations. This design allows for easy contributions of novel pipelines and capabilities.
|
||||
### Areas of contribution:
|
||||
|
||||
### How to Submit Contributions
|
||||
#### Development
|
||||
If you’d like to help with development, please see our [development guide](contribution_guides/development.md). If you’re unfamiliar with contributing to open source projects, there is a tutorial contained within the development guide.
|
||||
|
||||
To start contributing, please follow these steps:
|
||||
#### Documentation
|
||||
If you’d like to help with documentation, please see our [documentation guide](contribution_guides/documenation.md).
|
||||
|
||||
1. Familiarize yourself with our roadmap and open projects to see where your skills and interests align. These documents can serve as a source of inspiration.
|
||||
2. Open a Pull Request (PR) with a clear description of the feature you're adding or the problem you're solving. Make sure your contribution aligns with the project's vision.
|
||||
3. Adhere to general best practices. This includes assuming interoperability with other nodes, keeping the scope of your functions as small as possible, and organizing your code according to our architecture documents.
|
||||
#### Translation
|
||||
If you'd like to help with translation, please see our [translation guide](docs/contributing/.contribution_guides/translation.md).
|
||||
|
||||
### Types of Contributions We're Looking For
|
||||
#### Tutorials
|
||||
Please reach out to @imic or @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy) to help create tutorials for InvokeAI.
|
||||
|
||||
We welcome all contributions that improve the project. Right now, we're especially looking for:
|
||||
We hope you enjoy using our software as much as we enjoy creating it, and we hope that some of those of you who are reading this will elect to become part of our contributor community.
|
||||
|
||||
1. Quality of life (QOL) enhancements on the front-end.
|
||||
2. New backend capabilities added through nodes.
|
||||
3. Incorporating additional optimizations from the broader open-source software community.
|
||||
|
||||
### Communication and Decision-making Process
|
||||
### Contributors
|
||||
|
||||
Project maintainers and code owners review PRs to ensure they align with the project's goals. They may provide design or architectural guidance, suggestions on user experience, or provide more significant feedback on the contribution itself. Expect to receive feedback on your submissions, and don't hesitate to ask questions or propose changes.
|
||||
This project is a combined effort of dedicated people from across the world. [Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for their time, hard work and effort.
|
||||
|
||||
For more robust discussions, or if you're planning to add capabilities not currently listed on our roadmap, please reach out to us on our Discord server. That way, we can ensure your proposed contribution aligns with the project's direction before you start writing code.
|
||||
### Code of Conduct
|
||||
|
||||
### Code of Conduct and Contribution Expectations
|
||||
|
||||
We want everyone in our community to have a positive experience. To facilitate this, we've established a code of conduct and a statement of values that we expect all contributors to adhere to. Please take a moment to review these documents—they're essential to maintaining a respectful and inclusive environment.
|
||||
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](https://github.com/invoke-ai/InvokeAI/blob/main/CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
|
||||
|
||||
By making a contribution to this project, you certify that:
|
||||
|
||||
@@ -49,6 +45,12 @@ This disclaimer is not a license and does not grant any rights or permissions. Y
|
||||
|
||||
This disclaimer is provided "as is" without warranty of any kind, whether expressed or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, or non-infringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the contribution or the use or other dealings in the contribution.
|
||||
|
||||
### Support
|
||||
|
||||
For support, please use this repository's [GitHub Issues](https://github.com/invoke-ai/InvokeAI/issues), or join the [Discord](https://discord.gg/ZmtBAhwWhy).
|
||||
|
||||
Original portions of the software are Copyright (c) 2023 by respective contributors.
|
||||
|
||||
---
|
||||
|
||||
Remember, your contributions help make this project great. We're excited to see what you'll bring to our community!
|
||||
|
||||
91
docs/contributing/contribution_guides/development.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# Development
|
||||
|
||||
## **What do I need to know to help?**
|
||||
|
||||
If you are looking to help to with a code contribution, InvokeAI uses several different technologies under the hood: Python (Pydantic, FastAPI, diffusers) and Typescript (React, Redux Toolkit, ChakraUI, Mantine, Konva). Familiarity with StableDiffusion and image generation concepts is helpful, but not essential.
|
||||
|
||||
For more information, please review our area specific documentation:
|
||||
|
||||
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
|
||||
* #### [Frontend Documentation](development_guides/contributingToFrontend.md)
|
||||
* #### [Node Documentation](../INVOCATIONS.md)
|
||||
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
|
||||
|
||||
If you don't feel ready to make a code contribution yet, no problem! You can also help out in other ways, such as [documentation](documentation.md) or [translation](translation.md).
|
||||
|
||||
There are two paths to making a development contribution:
|
||||
|
||||
1. Choosing an open issue to address. Open issues can be found in the [Issues](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen) section of the InvokeAI repository. These are tagged by the issue type (bug, enhancement, etc.) along with the “good first issues” tag denoting if they are suitable for first time contributors.
|
||||
1. Additional items can be found on our roadmap <******************************link to roadmap>******************************. The roadmap is organized in terms of priority, and contains features of varying size and complexity. If there is an inflight item you’d like to help with, reach out to the contributor assigned to the item to see how you can help.
|
||||
2. Opening a new issue or feature to add. **Please make sure you have searched through existing issues before creating new ones.**
|
||||
|
||||
*Regardless of what you choose, please post in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord before you start development in order to confirm that the issue or feature is aligned with the current direction of the project. We value our contributors time and effort and want to ensure that no one’s time is being misspent.*
|
||||
|
||||
## Best Practices:
|
||||
* Keep your pull requests small. Smaller pull requests are more likely to be accepted and merged
|
||||
* Comments! Commenting your code helps reviwers easily understand your contribution
|
||||
* Use Python and Typescript’s typing systems, and consider using an editor with [LSP](https://microsoft.github.io/language-server-protocol/) support to streamline development
|
||||
* Make all communications public. This ensure knowledge is shared with the whole community
|
||||
|
||||
## **How do I make a contribution?**
|
||||
|
||||
Never made an open source contribution before? Wondering how contributions work in our project? Here's a quick rundown!
|
||||
|
||||
Before starting these steps, ensure you have your local environment [configured for development](../LOCAL_DEVELOPMENT.md).
|
||||
|
||||
1. Find a [good first issue](https://github.com/invoke-ai/InvokeAI/contribute) that you are interested in addressing or a feature that you would like to add. Then, reach out to our team in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord to ensure you are setup for success.
|
||||
2. Fork the [InvokeAI](https://github.com/invoke-ai/InvokeAI) repository to your GitHub profile. This means that you will have a copy of the repository under **your-GitHub-username/InvokeAI**.
|
||||
3. Clone the repository to your local machine using:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/your-GitHub-username/InvokeAI.git
|
||||
```
|
||||
|
||||
If you're unfamiliar with using Git through the commandline, [GitHub Desktop](https://desktop.github.com) is a easy-to-use alternative with a UI. You can do all the same steps listed here, but through the interface.
|
||||
|
||||
4. Create a new branch for your fix using:
|
||||
|
||||
```bash
|
||||
git checkout -b branch-name-here
|
||||
```
|
||||
|
||||
5. Make the appropriate changes for the issue you are trying to address or the feature that you want to add.
|
||||
6. Add the file contents of the changed files to the "snapshot" git uses to manage the state of the project, also known as the index:
|
||||
|
||||
```bash
|
||||
git add insert-paths-of-changed-files-here
|
||||
```
|
||||
|
||||
7. Store the contents of the index with a descriptive message.
|
||||
|
||||
```bash
|
||||
git commit -m "Insert a short message of the changes made here"
|
||||
```
|
||||
|
||||
8. Push the changes to the remote repository using
|
||||
|
||||
```markdown
|
||||
git push origin branch-name-here
|
||||
```
|
||||
|
||||
9. Submit a pull request to the **main** branch of the InvokeAI repository.
|
||||
10. Title the pull request with a short description of the changes made and the issue or bug number associated with your change. For example, you can title an issue like so "Added more log outputting to resolve #1234".
|
||||
11. In the description of the pull request, explain the changes that you made, any issues you think exist with the pull request you made, and any questions you have for the maintainer. It's OK if your pull request is not perfect (no pull request is), the reviewer will be able to help you fix any problems and improve it!
|
||||
12. Wait for the pull request to be reviewed by other collaborators.
|
||||
13. Make changes to the pull request if the reviewer(s) recommend them.
|
||||
14. Celebrate your success after your pull request is merged!
|
||||
|
||||
If you’d like to learn more about contributing to Open Source projects, here is a [Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github).
|
||||
|
||||
## **Where can I go for help?**
|
||||
|
||||
If you need help, you can ask questions in the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) channel of the Discord.
|
||||
|
||||
For frontend related work, **@pyschedelicious** is the best person to reach out to.
|
||||
|
||||
For backend related work, please reach out to **@blessedcoolant**, **@lstein**, **@StAlKeR7779** or **@pyschedelicious**.
|
||||
|
||||
## **What does the Code of Conduct mean for me?**
|
||||
|
||||
Our [Code of Conduct](CODE_OF_CONDUCT.md) means that you are responsible for treating everyone on the project with respect and courtesy regardless of their identity. If you are the victim of any inappropriate behavior or comments as described in our Code of Conduct, we are here for you and will do the best to ensure that the abuser is reprimanded appropriately, per our code.
|
||||
|
||||
@@ -0,0 +1,75 @@
|
||||
# Contributing to the Frontend
|
||||
|
||||
# InvokeAI Web UI
|
||||
|
||||
- [InvokeAI Web UI](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#invokeai-web-ui)
|
||||
- [Stack](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#stack)
|
||||
- [Contributing](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#contributing)
|
||||
- [Dev Environment](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#dev-environment)
|
||||
- [Production builds](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#production-builds)
|
||||
|
||||
The UI is a fairly straightforward Typescript React app, with the Unified Canvas being more complex.
|
||||
|
||||
Code is located in `invokeai/frontend/web/` for review.
|
||||
|
||||
## Stack
|
||||
|
||||
State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). We lean heavily on RTK:
|
||||
|
||||
- `createAsyncThunk` for HTTP requests
|
||||
- `createEntityAdapter` for fetching images and models
|
||||
- `createListenerMiddleware` for workflows
|
||||
|
||||
The API client and associated types are generated from the OpenAPI schema. See API_CLIENT.md.
|
||||
|
||||
Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a simple socket.io redux middleware to help).
|
||||
|
||||
[Chakra-UI](https://github.com/chakra-ui/chakra-ui) & [Mantine](https://github.com/mantinedev/mantine) for components and styling.
|
||||
|
||||
[Konva](https://github.com/konvajs/react-konva) for the canvas, but we are pushing the limits of what is feasible with it (and HTML canvas in general). We plan to rebuild it with [PixiJS](https://github.com/pixijs/pixijs) to take advantage of WebGL's improved raster handling.
|
||||
|
||||
[Vite](https://vitejs.dev/) for bundling.
|
||||
|
||||
Localisation is via [i18next](https://github.com/i18next/react-i18next), but translation happens on our [Weblate](https://hosted.weblate.org/engage/invokeai/) project. Only the English source strings should be changed on this repo.
|
||||
|
||||
## Contributing
|
||||
|
||||
Thanks for your interest in contributing to the InvokeAI Web UI!
|
||||
|
||||
We encourage you to ping @psychedelicious and @blessedcoolant on [Discord](https://discord.gg/ZmtBAhwWhy) if you want to contribute, just to touch base and ensure your work doesn't conflict with anything else going on. The project is very active.
|
||||
|
||||
### Dev Environment
|
||||
|
||||
**Setup**
|
||||
|
||||
1. Install [node](https://nodejs.org/en/download/). You can confirm node is installed with:
|
||||
```bash
|
||||
node --version
|
||||
```
|
||||
2. Install [yarn classic](https://classic.yarnpkg.com/lang/en/) and confirm it is installed by running this:
|
||||
```bash
|
||||
npm install --global yarn
|
||||
yarn --version
|
||||
```
|
||||
|
||||
From `invokeai/frontend/web/` run `yarn install` to get everything set up.
|
||||
|
||||
Start everything in dev mode:
|
||||
1. Ensure your virtual environment is running
|
||||
2. Start the dev server: `yarn dev`
|
||||
3. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root`
|
||||
4. Point your browser to the dev server address e.g. [http://localhost:5173/](http://localhost:5173/)
|
||||
|
||||
### VSCode Remote Dev
|
||||
|
||||
We've noticed an intermittent issue with the VSCode Remote Dev port forwarding. If you use this feature of VSCode, you may intermittently click the Invoke button and then get nothing until the request times out. Suggest disabling the IDE's port forwarding feature and doing it manually via SSH:
|
||||
|
||||
`ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host`
|
||||
|
||||
### Production builds
|
||||
|
||||
For a number of technical and logistical reasons, we need to commit UI build artefacts to the repo.
|
||||
|
||||
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
|
||||
|
||||
To build for production, run `yarn build`.
|
||||
13
docs/contributing/contribution_guides/documentation.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# Documentation
|
||||
|
||||
Documentation is an important part of any open source project. It provides a clear and concise way to communicate how the software works, how to use it, and how to troubleshoot issues. Without proper documentation, it can be difficult for users to understand the purpose and functionality of the project.
|
||||
|
||||
## Contributing
|
||||
|
||||
All documentation is maintained in the InvokeAI GitHub repository. If you come across documentation that is out of date or incorrect, please submit a pull request with the necessary changes.
|
||||
|
||||
When updating or creating documentation, please keep in mind InvokeAI is a tool for everyone, not just those who have familiarity with generative art.
|
||||
|
||||
## Help & Questions
|
||||
|
||||
Please ping @imic1 or @hipsterusername in the [Discord](https://discord.com/channels/1020123559063990373/1049495067846524939) if you have any questions.
|
||||
19
docs/contributing/contribution_guides/translation.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Translation
|
||||
|
||||
InvokeAI uses [Weblate](https://weblate.org/) for translation. Weblate is a FOSS project providing a scalable translation service. Weblate automates the tedious parts of managing translation of a growing project, and the service is generously provided at no cost to FOSS projects like InvokeAI.
|
||||
|
||||
## Contributing
|
||||
|
||||
If you'd like to contribute by adding or updating a translation, please visit our [Weblate project](https://hosted.weblate.org/engage/invokeai/). You'll need to sign in with your GitHub account (a number of other accounts are supported, including Google).
|
||||
|
||||
Once signed in, select a language and then the Web UI component. From here you can Browse and Translate strings from English to your chosen language. Zen mode offers a simpler translation experience.
|
||||
|
||||
Your changes will be attributed to you in the automated PR process; you don't need to do anything else.
|
||||
|
||||
## Help & Questions
|
||||
|
||||
Please check Weblate's [documentation](https://docs.weblate.org/en/latest/index.html) or ping @Harvestor on [Discord](https://discord.com/channels/1020123559063990373/1049495067846524939) if you have any questions.
|
||||
|
||||
## Thanks
|
||||
|
||||
Thanks to the InvokeAI community for their efforts to translate the project!
|
||||
11
docs/contributing/contribution_guides/tutorials.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Tutorials
|
||||
|
||||
Tutorials help new & existing users expand their abilty to use InvokeAI to the full extent of our features and services.
|
||||
|
||||
Currently, we have a set of tutorials available on our [YouTube channel](https://www.youtube.com/@invokeai), but as InvokeAI continues to evolve with new updates, we want to ensure that we are giving our users the resources they need to succeed.
|
||||
|
||||
Tutorials can be in the form of videos or article walkthroughs on a subject of your choice. We recommend focusing tutorials on the key image generation methods, or on a specific component within one of the image generation methods.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please reach out to @imic or @hipsterusername on [Discord](https://discord.gg/ZmtBAhwWhy) to help create tutorials for InvokeAI.
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
title: Concepts
|
||||
title: Textual Inversion Embeddings and LoRAs
|
||||
---
|
||||
|
||||
# :material-library-shelves: The Hugging Face Concepts Library and Importing Textual Inversion files
|
||||
# :material-library-shelves: Textual Inversions and LoRAs
|
||||
|
||||
With the advances in research, many new capabilities are available to customize the knowledge and understanding of novel concepts not originally contained in the base model.
|
||||
|
||||
@@ -64,21 +64,25 @@ select the embedding you'd like to use. This UI has type-ahead support, so you c
|
||||
|
||||
## Using LoRAs
|
||||
|
||||
LoRA files are models that customize the output of Stable Diffusion image generation.
|
||||
Larger than embeddings, but much smaller than full models, they augment SD with improved
|
||||
understanding of subjects and artistic styles.
|
||||
LoRA files are models that customize the output of Stable Diffusion
|
||||
image generation. Larger than embeddings, but much smaller than full
|
||||
models, they augment SD with improved understanding of subjects and
|
||||
artistic styles.
|
||||
|
||||
Unlike TI files, LoRAs do not introduce novel vocabulary into the model's known tokens. Instead,
|
||||
LoRAs augment the model's weights that are applied to generate imagery. LoRAs may be supplied
|
||||
with a "trigger" word that they have been explicitly trained on, or may simply apply their
|
||||
effect without being triggered.
|
||||
Unlike TI files, LoRAs do not introduce novel vocabulary into the
|
||||
model's known tokens. Instead, LoRAs augment the model's weights that
|
||||
are applied to generate imagery. LoRAs may be supplied with a
|
||||
"trigger" word that they have been explicitly trained on, or may
|
||||
simply apply their effect without being triggered.
|
||||
|
||||
LoRAs are typically stored in .safetensors files, which are the most secure way to store and transmit
|
||||
these types of weights. You may install any number of `.safetensors` LoRA files simply by copying them into
|
||||
the `lora` directory of the corresponding InvokeAI models directory (usually `invokeai`
|
||||
in your home directory). For example, you can simply move a Stable Diffusion 1.5 LoRA file to
|
||||
the `sd-1/lora` folder.
|
||||
LoRAs are typically stored in .safetensors files, which are the most
|
||||
secure way to store and transmit these types of weights. You may
|
||||
install any number of `.safetensors` LoRA files simply by copying them
|
||||
into the `autoimport/lora` directory of the corresponding InvokeAI models
|
||||
directory (usually `invokeai` in your home directory).
|
||||
|
||||
To use these when generating, open the LoRA menu item in the options panel, select the LoRAs you want to apply
|
||||
and ensure that they have the appropriate weight recommended by the model provider. Typically, most LoRAs perform best at a weight of .75-1.
|
||||
To use these when generating, open the LoRA menu item in the options
|
||||
panel, select the LoRAs you want to apply and ensure that they have
|
||||
the appropriate weight recommended by the model provider. Typically,
|
||||
most LoRAs perform best at a weight of .75-1.
|
||||
|
||||
|
||||
@@ -8,20 +8,64 @@ title: ControlNet
|
||||
|
||||
ControlNet
|
||||
|
||||
ControlNet is a powerful set of features developed by the open-source community (notably, Stanford researcher [**@ilyasviel**](https://github.com/lllyasviel)) that allows you to apply a secondary neural network model to your image generation process in Invoke.
|
||||
ControlNet is a powerful set of features developed by the open-source
|
||||
community (notably, Stanford researcher
|
||||
[**@ilyasviel**](https://github.com/lllyasviel)) that allows you to
|
||||
apply a secondary neural network model to your image generation
|
||||
process in Invoke.
|
||||
|
||||
With ControlNet, you can get more control over the output of your image generation, providing you with a way to direct the network towards generating images that better fit your desired style or outcome.
|
||||
With ControlNet, you can get more control over the output of your
|
||||
image generation, providing you with a way to direct the network
|
||||
towards generating images that better fit your desired style or
|
||||
outcome.
|
||||
|
||||
|
||||
### How it works
|
||||
|
||||
ControlNet works by analyzing an input image, pre-processing that image to identify relevant information that can be interpreted by each specific ControlNet model, and then inserting that control information into the generation process. This can be used to adjust the style, composition, or other aspects of the image to better achieve a specific result.
|
||||
ControlNet works by analyzing an input image, pre-processing that
|
||||
image to identify relevant information that can be interpreted by each
|
||||
specific ControlNet model, and then inserting that control information
|
||||
into the generation process. This can be used to adjust the style,
|
||||
composition, or other aspects of the image to better achieve a
|
||||
specific result.
|
||||
|
||||
|
||||
### Models
|
||||
|
||||
As part of the model installation, ControlNet models can be selected including a variety of pre-trained models that have been added to achieve different effects or styles in your generated images. Further ControlNet models may require additional code functionality to also be incorporated into Invoke's Invocations folder. You should expect to follow any installation instructions for ControlNet models loaded outside the default models provided by Invoke. The default models include:
|
||||
InvokeAI provides access to a series of ControlNet models that provide
|
||||
different effects or styles in your generated images. Currently
|
||||
InvokeAI only supports "diffuser" style ControlNet models. These are
|
||||
folders that contain the files `config.json` and/or
|
||||
`diffusion_pytorch_model.safetensors` and
|
||||
`diffusion_pytorch_model.fp16.safetensors`. The name of the folder is
|
||||
the name of the model.
|
||||
|
||||
***InvokeAI does not currently support checkpoint-format
|
||||
ControlNets. These come in the form of a single file with the
|
||||
extension `.safetensors`.***
|
||||
|
||||
Diffuser-style ControlNet models are available at HuggingFace
|
||||
(http://huggingface.co) and accessed via their repo IDs (identifiers
|
||||
in the format "author/modelname"). The easiest way to install them is
|
||||
to use the InvokeAI model installer application. Use the
|
||||
`invoke.sh`/`invoke.bat` launcher to select item [5] and then navigate
|
||||
to the CONTROLNETS section. Select the models you wish to install and
|
||||
press "APPLY CHANGES". You may also enter additional HuggingFace
|
||||
repo_ids in the "Additional models" textbox:
|
||||
|
||||
{:width="640px"}
|
||||
|
||||
Command-line users can launch the model installer using the command
|
||||
`invokeai-model-install`.
|
||||
|
||||
_Be aware that some ControlNet models require additional code
|
||||
functionality in order to work properly, so just installing a
|
||||
third-party ControlNet model may not have the desired effect._ Please
|
||||
read and follow the documentation for installing a third party model
|
||||
not currently included among InvokeAI's default list.
|
||||
|
||||
The models currently supported include:
|
||||
|
||||
**Canny**:
|
||||
|
||||
|
||||
@@ -4,15 +4,19 @@ title: InvokeAI Web Server
|
||||
|
||||
# :material-web: InvokeAI Web Server
|
||||
|
||||
As of version 2.0.0, this distribution comes with a full-featured web server
|
||||
(see screenshot).
|
||||
## Quick guided walkthrough of the WebUI's features
|
||||
|
||||
To use it, launch the `invoke.sh`/`invoke.bat` script and select
|
||||
option (2). Alternatively, with the InvokeAI environment active, run
|
||||
the `invokeai` script by adding the `--web` option:
|
||||
While most of the WebUI's features are intuitive, here is a guided walkthrough
|
||||
through its various components.
|
||||
|
||||
### Launching the WebUI
|
||||
|
||||
To run the InvokeAI web server, start the `invoke.sh`/`invoke.bat`
|
||||
script and select option (1). Alternatively, with the InvokeAI
|
||||
environment active, run `invokeai-web`:
|
||||
|
||||
```bash
|
||||
invokeai --web
|
||||
invokeai-web
|
||||
```
|
||||
|
||||
You can then connect to the server by pointing your web browser at
|
||||
@@ -28,33 +32,32 @@ invoke.sh --host 0.0.0.0
|
||||
or
|
||||
|
||||
```bash
|
||||
invokeai --web --host 0.0.0.0
|
||||
invokeai-web --host 0.0.0.0
|
||||
```
|
||||
|
||||
## Quick guided walkthrough of the WebUI's features
|
||||
|
||||
While most of the WebUI's features are intuitive, here is a guided walkthrough
|
||||
through its various components.
|
||||
### The InvokeAI Web Interface
|
||||
|
||||
{:width="640px"}
|
||||
|
||||
The screenshot above shows the Text to Image tab of the WebUI. There are three
|
||||
main sections:
|
||||
|
||||
1. A **control panel** on the left, which contains various settings for text to
|
||||
image generation. The most important part is the text field (currently
|
||||
showing `strawberry sushi`) for entering the text prompt, and the camera icon
|
||||
directly underneath that will render the image. We'll call this the _Invoke_
|
||||
button from now on.
|
||||
1. A **control panel** on the left, which contains various settings
|
||||
for text to image generation. The most important part is the text
|
||||
field (currently showing `fantasy painting, horned demon`) for
|
||||
entering the positive text prompt, another text field right below it for an
|
||||
optional negative text prompt (concepts to exclude), and a _Invoke_ button
|
||||
to begin the image rendering process.
|
||||
|
||||
2. The **current image** section in the middle, which shows a large format
|
||||
version of the image you are currently working on. A series of buttons at the
|
||||
top ("image to image", "Use All", "Use Seed", etc) lets you modify the image
|
||||
in various ways.
|
||||
2. The **current image** section in the middle, which shows a large
|
||||
format version of the image you are currently working on. A series
|
||||
of buttons at the top lets you modify and manipulate the image in
|
||||
various ways.
|
||||
|
||||
3. A \*_gallery_ section on the left that contains a history of the images you
|
||||
3. A **gallery** section on the left that contains a history of the images you
|
||||
have generated. These images are read and written to the directory specified
|
||||
at launch time in `--outdir`.
|
||||
in the `INVOKEAIROOT/invokeai.yaml` initialization file, usually a directory
|
||||
named `outputs` in `INVOKEAIROOT`.
|
||||
|
||||
In addition to these three elements, there are a series of icons for changing
|
||||
global settings, reporting bugs, and changing the theme on the upper right.
|
||||
@@ -76,14 +79,10 @@ From top to bottom, these are:
|
||||
with outpainting,and modify interior portions of the image with
|
||||
inpainting, erase portions of a starting image and have the AI fill in
|
||||
the erased region from a text prompt.
|
||||
4. Workflow Management (not yet implemented) - this panel will allow you to create
|
||||
4. Node Editor - (experimental) this panel allows you to create
|
||||
pipelines of common operations and combine them into workflows.
|
||||
5. Training (not yet implemented) - this panel will provide an interface to [textual
|
||||
inversion training](TEXTUAL_INVERSION.md) and fine tuning.
|
||||
|
||||
The inpainting, outpainting and postprocessing tabs are currently in
|
||||
development. However, limited versions of their features can already be accessed
|
||||
through the Text to Image and Image to Image tabs.
|
||||
5. Model Manager - this panel allows you to import and configure new
|
||||
models using URLs, local paths, or HuggingFace diffusers repo_ids.
|
||||
|
||||
## Walkthrough
|
||||
|
||||
@@ -92,43 +91,54 @@ feature set.
|
||||
|
||||
### Text to Image
|
||||
|
||||
1. Launch the WebUI using `python scripts/invoke.py --web` and connect to it
|
||||
with your browser by accessing `http://localhost:9090`. If the browser and
|
||||
server are running on different machines on your LAN, add the option
|
||||
`--host 0.0.0.0` to the launch command line and connect to the machine
|
||||
hosting the web server using its IP address or domain name.
|
||||
1. Launch the WebUI using launcher option [1] and connect to it with
|
||||
your browser by accessing `http://localhost:9090`. If the browser
|
||||
and server are running on different machines on your LAN, add the
|
||||
option `--host 0.0.0.0` to the `invoke.sh` launch command line and connect to
|
||||
the machine hosting the web server using its IP address or domain
|
||||
name.
|
||||
|
||||
2. If all goes well, the WebUI should come up and you'll see a green
|
||||
`connected` message on the upper right.
|
||||
2. If all goes well, the WebUI should come up and you'll see a green dot
|
||||
meaning `connected` on the upper right.
|
||||
|
||||
{ align=right width=300px }
|
||||
|
||||
#### Basics
|
||||
|
||||
1. Generate an image by typing _strawberry sushi_ into the large prompt field
|
||||
on the upper left and then clicking on the Invoke button (the one with the
|
||||
Camera icon). After a short wait, you'll see a large image of sushi in the
|
||||
1. Generate an image by typing _bluebird_ into the large prompt field
|
||||
on the upper left and then clicking on the Invoke button or pressing
|
||||
the return button.
|
||||
After a short wait, you'll see a large image of a bluebird in the
|
||||
image panel, and a new thumbnail in the gallery on the right.
|
||||
|
||||
If you need more room on the screen, you can turn the gallery off by
|
||||
clicking on the **x** to the right of "Your Invocations". You can turn it
|
||||
back on later by clicking the image icon that appears in the gallery's
|
||||
place.
|
||||
If you need more room on the screen, you can turn the gallery off
|
||||
by typing the **g** hotkey. You can turn it back on later by clicking the
|
||||
image icon that appears in the gallery's place. The list of hotkeys can
|
||||
be found by clicking on the keyboard icon above the image gallery.
|
||||
|
||||
The images are written into the directory indicated by the `--outdir` option
|
||||
provided at script launch time. By default, this is `outputs/img-samples`
|
||||
under the InvokeAI directory.
|
||||
|
||||
2. Generate a bunch of strawberry sushi images by increasing the number of
|
||||
requested images by adjusting the Images counter just below the Camera
|
||||
2. Generate a bunch of bluebird images by increasing the number of
|
||||
requested images by adjusting the Images counter just below the Invoke
|
||||
button. As each is generated, it will be added to the gallery. You can
|
||||
switch the active image by clicking on the gallery thumbnails.
|
||||
|
||||
If you'd like to watch the image generation progress, click the hourglass
|
||||
icon above the main image area. As generation progresses, you'll see
|
||||
increasingly detailed versions of the ultimate image.
|
||||
|
||||
3. Try playing with different settings, including image width and height, the
|
||||
Sampler, the Steps and the CFG scale.
|
||||
3. Try playing with different settings, including changing the main
|
||||
model, the image width and height, the Scheduler, the Steps and
|
||||
the CFG scale.
|
||||
|
||||
The _Model_ changes the main model. Thousands of custom models are
|
||||
now available, which generate a variety of image styles and
|
||||
subjects. While InvokeAI comes with a few starter models, it is
|
||||
easy to import new models into the application. See [Installing
|
||||
Models](../installation/050_INSTALLING_MODELS.md) for more details.
|
||||
|
||||
Image _Width_ and _Height_ do what you'd expect. However, be aware that
|
||||
larger images consume more VRAM memory and take longer to generate.
|
||||
|
||||
The _Sampler_ controls how the AI selects the image to display. Some
|
||||
The _Scheduler_ controls how the AI selects the image to display. Some
|
||||
samplers are more "creative" than others and will produce a wider range of
|
||||
variations (see next section). Some samplers run faster than others.
|
||||
|
||||
@@ -142,17 +152,27 @@ feature set.
|
||||
to the input prompt. You can go as high or low as you like, but generally
|
||||
values greater than 20 won't improve things much, and values lower than 5
|
||||
will produce unexpected images. There are complex interactions between
|
||||
_Steps_, _CFG Scale_ and the _Sampler_, so experiment to find out what works
|
||||
_Steps_, _CFG Scale_ and the _Scheduler_, so experiment to find out what works
|
||||
for you.
|
||||
|
||||
The _Seed_ controls the series of values returned by InvokeAI's
|
||||
random number generator. Each unique seed value will generate a different
|
||||
image. To regenerate a previous image, simply use the original image's
|
||||
seed value. A slider to the right of the _Seed_ field will change the
|
||||
seed each time an image is generated.
|
||||
|
||||
4. To regenerate a previously-generated image, select the image you want and
|
||||
click _Use All_. This loads the text prompt and other original settings into
|
||||
the control panel. If you then press _Invoke_ it will regenerate the image
|
||||
exactly. You can also selectively modify the prompt or other settings to
|
||||
tweak the image.
|
||||
{ align=right width=400px }
|
||||
|
||||
Alternatively, you may click on _Use Seed_ to load just the image's seed,
|
||||
and leave other settings unchanged.
|
||||
4. To regenerate a previously-generated image, select the image you
|
||||
want and click the asterisk ("*") button at the top of the
|
||||
image. This loads the text prompt and other original settings into
|
||||
the control panel. If you then press _Invoke_ it will regenerate
|
||||
the image exactly. You can also selectively modify the prompt or
|
||||
other settings to tweak the image.
|
||||
|
||||
Alternatively, you may click on the "sprouting plant icon" to load
|
||||
just the image's seed, and leave other settings unchanged or the
|
||||
quote icon to load just the positive and negative prompts.
|
||||
|
||||
5. To regenerate a Stable Diffusion image that was generated by another SD
|
||||
package, you need to know its text prompt and its _Seed_. Copy-paste the
|
||||
@@ -161,62 +181,22 @@ feature set.
|
||||
you Invoke, you will get something similar to the original image. It will
|
||||
not be exact unless you also set the correct values for the original
|
||||
sampler, CFG, steps and dimensions, but it will (usually) be close.
|
||||
|
||||
6. To save an image, right click on it to bring up a menu that will
|
||||
let you download the image, save it to a named image gallery, and
|
||||
copy it to the clipboard, among other things.
|
||||
|
||||
#### Variations on a theme
|
||||
#### Upscaling
|
||||
|
||||
1. Let's try generating some variations. Select your favorite sushi image from
|
||||
the gallery to load it. Then select "Use All" from the list of buttons
|
||||
above. This will load up all the settings used to generate this image,
|
||||
including its unique seed.
|
||||
{ align=right width=400px }
|
||||
|
||||
Go down to the Variations section of the Control Panel and set the button to
|
||||
On. Set Variation Amount to 0.2 to generate a modest number of variations on
|
||||
the image, and also set the Image counter to `4`. Press the `invoke` button.
|
||||
This will generate a series of related images. To obtain smaller variations,
|
||||
just lower the Variation Amount. You may also experiment with changing the
|
||||
Sampler. Some samplers generate more variability than others. _k_euler_a_ is
|
||||
particularly creative, while _ddim_ is pretty conservative.
|
||||
|
||||
2. For even more variations, experiment with increasing the setting for
|
||||
_Perlin_. This adds a bit of noise to the image generation process. Note
|
||||
that values of Perlin noise greater than 0.15 produce poor images for
|
||||
several of the samplers.
|
||||
|
||||
#### Facial reconstruction and upscaling
|
||||
|
||||
Stable Diffusion frequently produces mangled faces, particularly when there are
|
||||
multiple figures in the same scene. Stable Diffusion has particular issues with
|
||||
generating reallistic eyes. InvokeAI provides the ability to reconstruct faces
|
||||
using either the GFPGAN or CodeFormer libraries. For more information see
|
||||
[POSTPROCESS](POSTPROCESS.md).
|
||||
|
||||
1. Invoke a prompt that generates a mangled face. A prompt that often gives
|
||||
this is "portrait of a lawyer, 3/4 shot" (this is not intended as a slur
|
||||
against lawyers!) Once you have an image that needs some touching up, load
|
||||
it into the Image panel, and press the button with the face icon
|
||||
(highlighted in the first screenshot below). A dialog box will appear. Leave
|
||||
_Strength_ at 0.8 and press \*Restore Faces". If all goes well, the eyes and
|
||||
other aspects of the face will be improved (see the second screenshot)
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
The facial reconstruction _Strength_ field adjusts how aggressively the face
|
||||
library will try to alter the face. It can be as high as 1.0, but be aware
|
||||
that this often softens the face airbrush style, losing some details. The
|
||||
default 0.8 is usually sufficient.
|
||||
|
||||
2. "Upscaling" is the process of increasing the size of an image while
|
||||
retaining the sharpness. InvokeAI uses an external library called "ESRGAN"
|
||||
to do this. To invoke upscaling, simply select an image and press the _HD_
|
||||
button above it. You can select between 2X and 4X upscaling, and adjust the
|
||||
upscaling strength, which has much the same meaning as in facial
|
||||
reconstruction. Try running this on one of your previously-generated images.
|
||||
|
||||
3. Finally, you can run facial reconstruction and/or upscaling automatically
|
||||
after each Invocation. Go to the Advanced Options section of the Control
|
||||
Panel and turn on _Restore Face_ and/or _Upscale_.
|
||||
"Upscaling" is the process of increasing the size of an image while
|
||||
retaining the sharpness. InvokeAI uses an external library called
|
||||
"ESRGAN" to do this. To invoke upscaling, simply select an image
|
||||
and press the "expanding arrows" button above it. You can select
|
||||
between 2X and 4X upscaling, and adjust the upscaling strength,
|
||||
which has much the same meaning as in facial reconstruction. Try
|
||||
running this on one of your previously-generated images.
|
||||
|
||||
### Image to Image
|
||||
|
||||
@@ -224,24 +204,14 @@ InvokeAI lets you take an existing image and use it as the basis for a new
|
||||
creation. You can use any sort of image, including a photograph, a scanned
|
||||
sketch, or a digital drawing, as long as it is in PNG or JPEG format.
|
||||
|
||||
For this tutorial, we'll use files named
|
||||
[Lincoln-and-Parrot-512.png](../assets/Lincoln-and-Parrot-512.png), and
|
||||
[Lincoln-and-Parrot-512-transparent.png](../assets/Lincoln-and-Parrot-512-transparent.png).
|
||||
Download these images to your local machine now to continue with the
|
||||
walkthrough.
|
||||
For this tutorial, we'll use the file named
|
||||
[Lincoln-and-Parrot-512.png](../assets/Lincoln-and-Parrot-512.png).
|
||||
|
||||
1. Click on the _Image to Image_ tab icon, which is the second icon from the
|
||||
top on the left-hand side of the screen:
|
||||
1. Click on the _Image to Image_ tab icon, which is the second icon
|
||||
from the top on the left-hand side of the screen. This will bring
|
||||
you to a screen similar to the one shown here:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
This will bring you to a screen similar to the one shown here:
|
||||
|
||||
<figure markdown>
|
||||
{:width="640px"}
|
||||
</figure>
|
||||
{ width="640px" }
|
||||
|
||||
2. Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or click
|
||||
the blank area to get an upload dialog. The image will load into an area
|
||||
@@ -255,120 +225,99 @@ walkthrough.
|
||||
{:width="640px"}
|
||||
|
||||
4. Experiment with the different settings. The most influential one in Image to
|
||||
Image is _Image to Image Strength_ located about midway down the control
|
||||
Image is _Denoising Strength_ located about midway down the control
|
||||
panel. By default it is set to 0.75, but can range from 0.0 to 0.99. The
|
||||
higher the value, the more of the original image the AI will replace. A
|
||||
value of 0 will leave the initial image completely unchanged, while 0.99
|
||||
will replace it completely. However, the Sampler and CFG Scale also
|
||||
will replace it completely. However, the _Scheduler_ and _CFG Scale_ also
|
||||
influence the final result. You can also generate variations in the same way
|
||||
as described in Text to Image.
|
||||
|
||||
5. What if we only want to change certain part(s) of the image and leave the
|
||||
rest intact? This is called Inpainting, and a future version of the InvokeAI
|
||||
web server will provide an interactive painting canvas on which you can
|
||||
directly draw the areas you wish to Inpaint into. For now, you can achieve
|
||||
this effect by using an external photoeditor tool to make one or more
|
||||
regions of the image transparent as described in [INPAINTING.md] and
|
||||
uploading that.
|
||||
|
||||
The file
|
||||
[Lincoln-and-Parrot-512-transparent.png](../assets/Lincoln-and-Parrot-512-transparent.png)
|
||||
is a version of the earlier image in which the area around the parrot has
|
||||
been replaced with transparency. Click on the "x" in the upper right of the
|
||||
Initial Image and upload the transparent version. Using the same prompt "old
|
||||
sea captain with raven on shoulder" try Invoking an image. This time, only
|
||||
the parrot will be replaced, leaving the rest of the original image intact:
|
||||
|
||||
<figure markdown>
|
||||
{:width="640px"}
|
||||
</figure>
|
||||
5. What if we only want to change certain part(s) of the image and
|
||||
leave the rest intact? This is called Inpainting, and you can do
|
||||
it in the [Unified Canvas](UNIFIED_CANVAS.md). The Unified Canvas
|
||||
also allows you to extend borders of the image and fill in the
|
||||
blank areas, a process called outpainting.
|
||||
|
||||
6. Would you like to modify a previously-generated image using the Image to
|
||||
Image facility? Easy! While in the Image to Image panel, hover over any of
|
||||
the gallery images to see a little menu of icons pop up. Click the picture
|
||||
icon to instantly send the selected image to Image to Image as the initial
|
||||
image.
|
||||
Image facility? Easy! While in the Image to Image panel, drag and drop any
|
||||
image in the gallery into the Initial Image area, and it will be ready for
|
||||
use. You can do the same thing with the main image display. Click on the
|
||||
_Send to_ icon to get a menu of
|
||||
commands and choose "Send to Image to Image".
|
||||
|
||||

|
||||
|
||||
You can do the same from the Text to Image tab by clicking on the picture icon
|
||||
above the central image panel. The screenshot below shows where the "use as
|
||||
initial image" icons are located.
|
||||
### Textual Inversion, LoRA and ControlNet
|
||||
|
||||
{:width="640px"}
|
||||
InvokeAI supports several different types of model files that
|
||||
extending the capabilities of the main model by adding artistic
|
||||
styles, special effects, or subjects. By mixing and matching textual
|
||||
inversion, LoRA and ControlNet models, you can achieve many
|
||||
interesting and beautiful effects.
|
||||
|
||||
### Unified Canvas
|
||||
We will give an example using a LoRA model named "Ink Scenery". This
|
||||
LoRA, which can be downloaded from Civitai (civitai.com), is
|
||||
specialized to paint landscapes that look like they were made with
|
||||
dripping india ink. To install this LoRA, we first download it and
|
||||
put it into the `autoimport/lora` folder located inside the
|
||||
`invokeai` root directory. After restarting the web server, the
|
||||
LoRA will now become available for use.
|
||||
|
||||
See the [Unified Canvas Guide](UNIFIED_CANVAS.md)
|
||||
To see this LoRA at work, we'll first generate an image without it
|
||||
using the standard `stable-diffusion-v1-5` model. Choose this
|
||||
model and enter the prompt "mountains, ink". Here is a typical
|
||||
generated image, a mountain range rendered in ink and watercolor
|
||||
wash:
|
||||
|
||||
## Reference
|
||||
{ width=512px }
|
||||
|
||||
### Additional Options
|
||||
Now let's install and activate the Ink Scenery LoRA. Go to
|
||||
https://civitai.com/models/78605/ink-scenery-or and download the LoRA
|
||||
model file to `invokeai/autoimport/lora` and restart the web
|
||||
server. (Alternatively, you can use [InvokeAI's Web Model
|
||||
Manager](../installation/050_INSTALLING_MODELS.md) to download and
|
||||
install the LoRA directly by typing its URL into the _Import
|
||||
Models_->_Location_ field).
|
||||
|
||||
| parameter <img width=160 align="right"> | effect |
|
||||
| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `--web_develop` | Starts the web server in development mode. |
|
||||
| `--web_verbose` | Enables verbose logging |
|
||||
| `--cors [CORS ...]` | Additional allowed origins, comma-separated |
|
||||
| `--host HOST` | Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network. |
|
||||
| `--port PORT` | Web server: Port to listen on |
|
||||
| `--certfile CERTFILE` | Web server: Path to certificate file to use for SSL. Use together with --keyfile |
|
||||
| `--keyfile KEYFILE` | Web server: Path to private key file to use for SSL. Use together with --certfile' |
|
||||
| `--gui` | Start InvokeAI GUI - This is the "desktop mode" version of the web app. It uses Flask to create a desktop app experience of the webserver. |
|
||||
Scroll down the control panel until you get to the LoRA accordion
|
||||
section, and open it:
|
||||
|
||||
### Web Specific Features
|
||||
{ width=512px }
|
||||
|
||||
The web experience offers an incredibly easy-to-use experience for interacting
|
||||
with the InvokeAI toolkit. For detailed guidance on individual features, see the
|
||||
Feature-specific help documents available in this directory. Note that the
|
||||
latest functionality available in the CLI may not always be available in the Web
|
||||
interface.
|
||||
Click the popup menu and select "Ink scenery". (If it isn't there, then
|
||||
the model wasn't installed to the right place, or perhaps you forgot
|
||||
to restart the web server.) The LoRA section will change to look like this:
|
||||
|
||||
#### Dark Mode & Light Mode
|
||||
{ width=512px }
|
||||
|
||||
The InvokeAI interface is available in a nano-carbon black & purple Dark Mode,
|
||||
and a "burn your eyes out Nosferatu" Light Mode. These can be toggled by
|
||||
clicking the Sun/Moon icons at the top right of the interface.
|
||||
Note that there is now a slider control for _Ink scenery_. The slider
|
||||
controls how much influence the LoRA model will have on the generated
|
||||
image.
|
||||
|
||||

|
||||
Run the "mountains, ink" prompt again and observe the change in style:
|
||||
|
||||

|
||||
{ width=512px }
|
||||
|
||||
#### Invocation Toolbar
|
||||
Try adjusting the weight slider for larger and smaller weights and
|
||||
generate the image after each adjustment. The higher the weight, the
|
||||
more influence the LoRA will have.
|
||||
|
||||
The left side of the InvokeAI interface is available for customizing the prompt
|
||||
and the settings used for invoking your new image. Typing your prompt into the
|
||||
open text field and clicking the Invoke button will produce the image based on
|
||||
the settings configured in the toolbar.
|
||||
To remove the LoRA completely, just click on its trash can icon.
|
||||
|
||||
See below for additional documentation related to each feature:
|
||||
Multiple LoRAs can be added simultaneously and combined with textual
|
||||
inversions and ControlNet models. Please see [Textual Inversions and
|
||||
LoRAs](CONCEPTS.md) and [Using ControlNet](CONTROLNET.md) for details.
|
||||
|
||||
- [Variations](./VARIATIONS.md)
|
||||
- [Upscaling](./POSTPROCESS.md#upscaling)
|
||||
- [Image to Image](./IMG2IMG.md)
|
||||
- [Other](./OTHER.md)
|
||||
## Summary
|
||||
|
||||
#### Invocation Gallery
|
||||
|
||||
The currently selected --outdir (or the default outputs folder) will display all
|
||||
previously generated files on load. As new invocations are generated, these will
|
||||
be dynamically added to the gallery, and can be previewed by selecting them.
|
||||
Each image also has a simple set of actions (e.g., Delete, Use Seed, Use All
|
||||
Parameters, etc.) that can be accessed by hovering over the image.
|
||||
|
||||
#### Image Workspace
|
||||
|
||||
When an image from the Invocation Gallery is selected, or is generated, the
|
||||
image will be displayed within the center of the interface. A quickbar of common
|
||||
image interactions are displayed along the top of the image, including:
|
||||
|
||||
- Use image in the `Image to Image` workflow
|
||||
- Initialize Face Restoration on the selected file
|
||||
- Initialize Upscaling on the selected file
|
||||
- View File metadata and details
|
||||
- Delete the file
|
||||
This walkthrough just skims the surface of the many things InvokeAI
|
||||
can do. Please see [Features](index.md) for more detailed reference
|
||||
guides.
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
A huge shout-out to the core team working to make this vision a reality,
|
||||
A huge shout-out to the core team working to make the Web GUI a reality,
|
||||
including [psychedelicious](https://github.com/psychedelicious),
|
||||
[Kyle0654](https://github.com/Kyle0654) and
|
||||
[blessedcoolant](https://github.com/blessedcoolant).
|
||||
|
||||
@@ -17,8 +17,12 @@ a single convenient digital artist-optimized user interface.
|
||||
### * [Prompt Engineering](PROMPTS.md)
|
||||
Get the images you want with the InvokeAI prompt engineering language.
|
||||
|
||||
## * The [Concepts Library](CONCEPTS.md)
|
||||
Add custom subjects and styles using HuggingFace's repository of embeddings.
|
||||
### * The [LoRA, LyCORIS and Textual Inversion Models](CONCEPTS.md)
|
||||
Add custom subjects and styles using a variety of fine-tuned models.
|
||||
|
||||
### * [ControlNet](CONTROLNET.md)
|
||||
Learn how to install and use ControlNet models for fine control over
|
||||
image output.
|
||||
|
||||
### * [Image-to-Image Guide](IMG2IMG.md)
|
||||
Use a seed image to build new creations in the CLI.
|
||||
@@ -29,26 +33,28 @@ are the ticket.
|
||||
|
||||
## Model Management
|
||||
|
||||
## * [Model Installation](../installation/050_INSTALLING_MODELS.md)
|
||||
### * [Model Installation](../installation/050_INSTALLING_MODELS.md)
|
||||
Learn how to import third-party models and switch among them. This
|
||||
guide also covers optimizing models to load quickly.
|
||||
|
||||
## * [Merging Models](MODEL_MERGING.md)
|
||||
### * [Merging Models](MODEL_MERGING.md)
|
||||
Teach an old model new tricks. Merge 2-3 models together to create a
|
||||
new model that combines characteristics of the originals.
|
||||
|
||||
## * [Textual Inversion](TEXTUAL_INVERSION.md)
|
||||
### * [Textual Inversion](TRAINING.md)
|
||||
Personalize models by adding your own style or subjects.
|
||||
|
||||
# Other Features
|
||||
## Other Features
|
||||
|
||||
## * [The NSFW Checker](NSFW.md)
|
||||
### * [The NSFW Checker](NSFW.md)
|
||||
Prevent InvokeAI from displaying unwanted racy images.
|
||||
|
||||
## * [Controlling Logging](LOGGING.md)
|
||||
### * [Controlling Logging](LOGGING.md)
|
||||
Control how InvokeAI logs status messages.
|
||||
|
||||
## * [Miscellaneous](OTHER.md)
|
||||
<!-- OUT OF DATE
|
||||
### * [Miscellaneous](OTHER.md)
|
||||
Run InvokeAI on Google Colab, generate images with repeating patterns,
|
||||
batch process a file of prompts, increase the "creativity" of image
|
||||
generation by adding initial noise, and more!
|
||||
-->
|
||||
|
||||
@@ -24,7 +24,7 @@ title: Home
|
||||
|
||||
[![CI checks on main badge]][ci checks on main link]
|
||||
[![CI checks on dev badge]][ci checks on dev link]
|
||||
[![latest commit to dev badge]][latest commit to dev link]
|
||||
<!-- [![latest commit to dev badge]][latest commit to dev link] -->
|
||||
|
||||
[![github open issues badge]][github open issues link]
|
||||
[![github open prs badge]][github open prs link]
|
||||
@@ -54,10 +54,10 @@ title: Home
|
||||
[github stars badge]:
|
||||
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
||||
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
|
||||
[latest commit to dev badge]:
|
||||
<!-- [latest commit to dev badge]:
|
||||
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
||||
[latest commit to dev link]:
|
||||
https://github.com/invoke-ai/InvokeAI/commits/development
|
||||
https://github.com/invoke-ai/InvokeAI/commits/main -->
|
||||
[latest release badge]:
|
||||
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
@@ -82,6 +82,25 @@ Q&A</a>]
|
||||
|
||||
This fork is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates. They will help aid diagnose issues faster.
|
||||
|
||||
## :octicons-package-dependencies-24: Installation
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
||||
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||
driver).
|
||||
|
||||
### [Installation Getting Started Guide](installation)
|
||||
#### **[Automated Installer](installation/010_INSTALL_AUTOMATED.md)**
|
||||
✅ This is the recommended installation method for first-time users.
|
||||
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||
This method is recommended for experienced users and developers
|
||||
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||
This method is recommended for those familiar with running Docker containers
|
||||
### Other Installation Guides
|
||||
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
|
||||
- [XFormers](installation/070_INSTALL_XFORMERS.md)
|
||||
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
|
||||
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
|
||||
|
||||
## :fontawesome-solid-computer: Hardware Requirements
|
||||
|
||||
### :octicons-cpu-24: System
|
||||
@@ -107,24 +126,6 @@ images in full-precision mode:
|
||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
||||
all its dependencies.
|
||||
|
||||
## :octicons-package-dependencies-24: Installation
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
||||
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||
driver).
|
||||
|
||||
### [Installation Getting Started Guide](installation)
|
||||
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
|
||||
This method is recommended for 1st time users
|
||||
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||
This method is recommended for experienced users and developers
|
||||
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||
This method is recommended for those familiar with running Docker containers
|
||||
### Other Installation Guides
|
||||
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
|
||||
- [XFormers](installation/070_INSTALL_XFORMERS.md)
|
||||
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
|
||||
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
|
||||
|
||||
## :octicons-gift-24: InvokeAI Features
|
||||
|
||||
@@ -145,8 +146,8 @@ This method is recommended for those familiar with running Docker containers
|
||||
### Model Management
|
||||
- [Installing](installation/050_INSTALLING_MODELS.md)
|
||||
- [Model Merging](features/MODEL_MERGING.md)
|
||||
- [ControlNet Models](features/CONTROLNET.md)
|
||||
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
|
||||
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
|
||||
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
|
||||
<!-- seperator -->
|
||||
### Prompt Engineering
|
||||
@@ -222,14 +223,10 @@ get solutions for common installation problems and other issues.
|
||||
|
||||
Anyone who wishes to contribute to this project, whether documentation,
|
||||
features, bug fixes, code cleanup, testing, or code reviews, is very much
|
||||
encouraged to do so. If you are unfamiliar with how to contribute to GitHub
|
||||
projects, here is a
|
||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github).
|
||||
encouraged to do so.
|
||||
|
||||
A full set of contribution guidelines, along with templates, are in progress,
|
||||
but for now the most important thing is to **make your pull request against the
|
||||
"development" branch**, and not against "main". This will help keep public
|
||||
breakage to a minimum and will allow you to propose more radical changes.
|
||||
[Please take a look at our Contribution documentation to learn more about contributing to InvokeAI.
|
||||
](contributing/CONTRIBUTING.md)
|
||||
|
||||
## :octicons-person-24: Contributors
|
||||
|
||||
|
||||
@@ -124,9 +124,9 @@ experimental versions later.
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest),
|
||||
and look for a file named:
|
||||
|
||||
- InvokeAI-installer-v2.X.X.zip
|
||||
- InvokeAI-installer-v3.X.X.zip
|
||||
|
||||
where "2.X.X" is the latest released version. The file is located
|
||||
where "3.X.X" is the latest released version. The file is located
|
||||
at the very bottom of the release page, under **Assets**.
|
||||
|
||||
4. **Unpack the installer**: Unpack the zip file into a convenient directory. This will create a new
|
||||
@@ -354,8 +354,8 @@ experimental versions later.
|
||||
|
||||
12. **InvokeAI Options**: You can launch InvokeAI with several different command-line arguments that
|
||||
customize its behavior. For example, you can change the location of the
|
||||
image output directory, or select your favorite sampler. See the
|
||||
[Command-Line Interface](../features/CLI.md) for a full list of the options.
|
||||
image output directory or balance memory usage vs performance. See
|
||||
[Configuration](../features/CONFIGURATION.md) for a full list of the options.
|
||||
|
||||
- To set defaults that will take effect every time you launch InvokeAI,
|
||||
use a text editor (e.g. Notepad) to exit the file
|
||||
|
||||
@@ -256,7 +256,7 @@ manager, please follow these steps:
|
||||
|
||||
10. Render away!
|
||||
|
||||
Browse the [features](../features/CLI.md) section to learn about all the
|
||||
Browse the [features](../features/index.md) section to learn about all the
|
||||
things you can do with InvokeAI.
|
||||
|
||||
|
||||
@@ -270,7 +270,7 @@ manager, please follow these steps:
|
||||
|
||||
12. Other scripts
|
||||
|
||||
The [Textual Inversion](../features/TEXTUAL_INVERSION.md) script can be launched with the command:
|
||||
The [Textual Inversion](../features/TRAINING.md) script can be launched with the command:
|
||||
|
||||
```bash
|
||||
invokeai-ti --gui
|
||||
|
||||
@@ -43,24 +43,7 @@ InvokeAI comes with support for a good set of starter models. You'll
|
||||
find them listed in the master models file
|
||||
`configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The
|
||||
subset that are currently installed are found in
|
||||
`configs/models.yaml`. As of v2.3.1, the list of starter models is:
|
||||
|
||||
|Model Name | HuggingFace Repo ID | Description | URL |
|
||||
|---------- | ---------- | ----------- | --- |
|
||||
|stable-diffusion-1.5|runwayml/stable-diffusion-v1-5|Stable Diffusion version 1.5 diffusers model (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
||||
|sd-inpainting-1.5|runwayml/stable-diffusion-inpainting|RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
||||
|stable-diffusion-2.1|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||
|sd-inpainting-2.0|stabilityai/stable-diffusion-2-inpainting|Stable Diffusion version 2.0 inpainting model (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-inpainting |
|
||||
|analog-diffusion-1.0|wavymulder/Analog-Diffusion|An SD-1.5 model trained on diverse analog photographs (2.13 GB)|https://huggingface.co/wavymulder/Analog-Diffusion |
|
||||
|deliberate-1.0|XpucT/Deliberate|Versatile model that produces detailed images up to 768px (4.27 GB)|https://huggingface.co/XpucT/Deliberate |
|
||||
|d&d-diffusion-1.0|0xJustin/Dungeons-and-Diffusion|Dungeons & Dragons characters (2.13 GB)|https://huggingface.co/0xJustin/Dungeons-and-Diffusion |
|
||||
|dreamlike-photoreal-2.0|dreamlike-art/dreamlike-photoreal-2.0|A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)|https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
|
||||
|inkpunk-1.0|Envvi/Inkpunk-Diffusion|Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)|https://huggingface.co/Envvi/Inkpunk-Diffusion |
|
||||
|openjourney-4.0|prompthero/openjourney|An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)|https://huggingface.co/prompthero/openjourney |
|
||||
|portrait-plus-1.0|wavymulder/portraitplus|An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)|https://huggingface.co/wavymulder/portraitplus |
|
||||
|seek-art-mega-1.0|coreco/seek.art_MEGA|A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)|https://huggingface.co/coreco/seek.art_MEGA |
|
||||
|trinart-2.0|naclbit/trinart_stable_diffusion_v2|An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)|https://huggingface.co/naclbit/trinart_stable_diffusion_v2 |
|
||||
|waifu-diffusion-1.4|hakurei/waifu-diffusion|An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)|https://huggingface.co/hakurei/waifu-diffusion |
|
||||
`configs/models.yaml`.
|
||||
|
||||
Note that these files are covered by an "Ethical AI" license which
|
||||
forbids certain uses. When you initially download them, you are asked
|
||||
@@ -71,8 +54,7 @@ with the model terms by visiting the URLs in the table above.
|
||||
|
||||
## Community-Contributed Models
|
||||
|
||||
There are too many to list here and more are being contributed every
|
||||
day. [HuggingFace](https://huggingface.co/models?library=diffusers)
|
||||
[HuggingFace](https://huggingface.co/models?library=diffusers)
|
||||
is a great resource for diffusers models, and is also the home of a
|
||||
[fast-growing repository](https://huggingface.co/sd-concepts-library)
|
||||
of embedding (".bin") models that add subjects and/or styles to your
|
||||
@@ -86,310 +68,106 @@ only `.safetensors` and `.ckpt` models, but they can be easily loaded
|
||||
into InvokeAI and/or converted into optimized `diffusers` models. Be
|
||||
aware that CIVITAI hosts many models that generate NSFW content.
|
||||
|
||||
!!! note
|
||||
|
||||
InvokeAI 2.3.x does not support directly importing and
|
||||
running Stable Diffusion version 2 checkpoint models. You may instead
|
||||
convert them into `diffusers` models using the conversion methods
|
||||
described below.
|
||||
|
||||
## Installation
|
||||
|
||||
There are multiple ways to install and manage models:
|
||||
There are two ways to install and manage models:
|
||||
|
||||
1. The `invokeai-configure` script which will download and install them for you.
|
||||
1. The `invokeai-model-install` script which will download and install
|
||||
them for you. In addition to supporting main models, you can install
|
||||
ControlNet, LoRA and Textual Inversion models.
|
||||
|
||||
2. The command-line tool (CLI) has commands that allows you to import, configure and modify
|
||||
models files.
|
||||
|
||||
3. The web interface (WebUI) has a GUI for importing and managing
|
||||
2. The web interface (WebUI) has a GUI for importing and managing
|
||||
models.
|
||||
|
||||
### Installation via `invokeai-configure`
|
||||
3. By placing models (or symbolic links to models) inside one of the
|
||||
InvokeAI root directory's `autoimport` folder.
|
||||
|
||||
From the `invoke` launcher, choose option (6) "re-run the configure
|
||||
script to download new models." This will launch the same script that
|
||||
prompted you to select models at install time. You can use this to add
|
||||
models that you skipped the first time around. It is all right to
|
||||
specify a model that was previously downloaded; the script will just
|
||||
confirm that the files are complete.
|
||||
### Installation via `invokeai-model-install`
|
||||
|
||||
### Installation via the CLI
|
||||
From the `invoke` launcher, choose option [5] "Download and install
|
||||
models." This will launch the same script that prompted you to select
|
||||
models at install time. You can use this to add models that you
|
||||
skipped the first time around. It is all right to specify a model that
|
||||
was previously downloaded; the script will just confirm that the files
|
||||
are complete.
|
||||
|
||||
You can install a new model, including any of the community-supported ones, via
|
||||
the command-line client's `!import_model` command.
|
||||
The installer has different panels for installing main models from
|
||||
HuggingFace, models from Civitai and other arbitrary web sites,
|
||||
ControlNet models, LoRA/LyCORIS models, and Textual Inversion
|
||||
embeddings. Each section has a text box in which you can enter a new
|
||||
model to install. You can refer to a model using its:
|
||||
|
||||
#### Installing individual `.ckpt` and `.safetensors` models
|
||||
1. Local path to the .ckpt, .safetensors or diffusers folder on your local machine
|
||||
2. A directory on your machine that contains multiple models
|
||||
3. A URL that points to a downloadable model
|
||||
4. A HuggingFace repo id
|
||||
|
||||
If the model is already downloaded to your local disk, use
|
||||
`!import_model /path/to/file.ckpt` to load it. For example:
|
||||
Previously-installed models are shown with checkboxes. Uncheck a box
|
||||
to unregister the model from InvokeAI. Models that are physically
|
||||
installed inside the InvokeAI root directory will be deleted and
|
||||
purged (after a confirmation warning). Models that are located outside
|
||||
the InvokeAI root directory will be unregistered but not deleted.
|
||||
|
||||
```bash
|
||||
invoke> !import_model C:/Users/fred/Downloads/martians.safetensors
|
||||
Note: The installer script uses a console-based text interface that requires
|
||||
significant amounts of horizontal and vertical space. If the display
|
||||
looks messed up, just enlarge the terminal window and/or relaunch the
|
||||
script.
|
||||
|
||||
If you wish you can script model addition and deletion, as well as
|
||||
listing installed models. Start the "developer's console" and give the
|
||||
command `invokeai-model-install --help`. This will give you a series
|
||||
of command-line parameters that will let you control model
|
||||
installation. Examples:
|
||||
|
||||
```
|
||||
# (list all controlnet models)
|
||||
invokeai-model-install --list controlnet
|
||||
|
||||
# (install the model at the indicated URL)
|
||||
invokeai-model-install --add http://civitai.com/2860
|
||||
|
||||
# (delete the named model)
|
||||
invokeai-model-install --delete sd-1/main/analog-diffusion
|
||||
```
|
||||
|
||||
!!! tip "Forward Slashes"
|
||||
On Windows systems, use forward slashes rather than backslashes
|
||||
in your file paths.
|
||||
If you do use backslashes,
|
||||
you must double them like this:
|
||||
`C:\\Users\\fred\\Downloads\\martians.safetensors`
|
||||
### Installation via the Web GUI
|
||||
|
||||
Alternatively you can directly import the file using its URL:
|
||||
To install a new model using the Web GUI, do the following:
|
||||
|
||||
```bash
|
||||
invoke> !import_model https://example.org/sd_models/martians.safetensors
|
||||
```
|
||||
1. Open the InvokeAI Model Manager (cube at the bottom of the
|
||||
left-hand panel) and navigate to *Import Models*
|
||||
|
||||
For this to work, the URL must not be password-protected. Otherwise
|
||||
you will receive a 404 error.
|
||||
2. In the field labeled *Location* type in the path to the model you
|
||||
wish to install. You may use a URL, HuggingFace repo id, or a path on
|
||||
your local disk.
|
||||
|
||||
When you import a legacy model, the CLI will first ask you what type
|
||||
of model this is. You can indicate whether it is a model based on
|
||||
Stable Diffusion 1.x (1.4 or 1.5), one based on Stable Diffusion 2.x,
|
||||
or a 1.x inpainting model. Be careful to indicate the correct model
|
||||
type, or it will not load correctly. You can correct the model type
|
||||
after the fact using the `!edit_model` command.
|
||||
3. Alternatively, the *Scan for Models* button allows you to paste in
|
||||
the path to a folder somewhere on your machine. It will be scanned for
|
||||
importable models and prompt you to add the ones of your choice.
|
||||
|
||||
The system will then ask you a few other questions about the model,
|
||||
including what size image it was trained on (usually 512x512), what
|
||||
name and description you wish to use for it, and whether you would
|
||||
like to install a custom VAE (variable autoencoder) file for the
|
||||
model. For recent models, the answer to the VAE question is usually
|
||||
"no," but it won't hurt to answer "yes".
|
||||
4. Press *Add Model* and wait for confirmation that the model
|
||||
was added.
|
||||
|
||||
After importing, the model will load. If this is successful, you will
|
||||
be asked if you want to keep the model loaded in memory to start
|
||||
generating immediately. You'll also be asked if you wish to make this
|
||||
the default model on startup. You can change this later using
|
||||
`!edit_model`.
|
||||
To delete a model, Select *Model Manager* to list all the currently
|
||||
installed models. Press the trash can icons to delete any models you
|
||||
wish to get rid of. Models whose weights are located inside the
|
||||
InvokeAI `models` directory will be purged from disk, while those
|
||||
located outside will be unregistered from InvokeAI, but not deleted.
|
||||
|
||||
#### Importing a batch of `.ckpt` and `.safetensors` models from a directory
|
||||
You can see where model weights are located by clicking on the model name.
|
||||
This will bring up an editable info panel showing the model's characteristics,
|
||||
including the `Model Location` of its files.
|
||||
|
||||
You may also point `!import_model` to a directory containing a set of
|
||||
`.ckpt` or `.safetensors` files. They will be imported _en masse_.
|
||||
### Installation via the `autoimport` function
|
||||
|
||||
!!! example
|
||||
In the InvokeAI root directory you will find a series of folders under
|
||||
`autoimport`, one each for main models, controlnets, embeddings and
|
||||
Loras. Any models that you add to these directories will be scanned
|
||||
at startup time and registered automatically.
|
||||
|
||||
```console
|
||||
invoke> !import_model C:/Users/fred/Downloads/civitai_models/
|
||||
```
|
||||
You may create symbolic links from these folders to models located
|
||||
elsewhere on disk and they will be autoimported. You can also create
|
||||
subfolders and organize them as you wish.
|
||||
|
||||
You will be given the option to import all models found in the
|
||||
directory, or select which ones to import. If there are subfolders
|
||||
within the directory, they will be searched for models to import.
|
||||
|
||||
#### Installing `diffusers` models
|
||||
|
||||
You can install a `diffusers` model from the HuggingFace site using
|
||||
`!import_model` and the HuggingFace repo_id for the model:
|
||||
|
||||
```bash
|
||||
invoke> !import_model andite/anything-v4.0
|
||||
```
|
||||
|
||||
Alternatively, you can download the model to disk and import it from
|
||||
there. The model may be distributed as a ZIP file, or as a Git
|
||||
repository:
|
||||
|
||||
```bash
|
||||
invoke> !import_model C:/Users/fred/Downloads/andite--anything-v4.0
|
||||
```
|
||||
|
||||
!!! tip "The CLI supports file path autocompletion"
|
||||
Type a bit of the path name and hit ++tab++ in order to get a choice of
|
||||
possible completions.
|
||||
|
||||
!!! tip "On Windows, you can drag model files onto the command-line"
|
||||
Once you have typed in `!import_model `, you can drag the
|
||||
model file or directory onto the command-line to insert the model path. This way, you don't need to
|
||||
type it or copy/paste. However, you will need to reverse or
|
||||
double backslashes as noted above.
|
||||
|
||||
Before installing, the CLI will ask you for a short name and
|
||||
description for the model, whether to make this the default model that
|
||||
is loaded at InvokeAI startup time, and whether to replace its
|
||||
VAE. Generally the answer to the latter question is "no".
|
||||
|
||||
### Converting legacy models into `diffusers`
|
||||
|
||||
The CLI `!convert_model` will convert a `.safetensors` or `.ckpt`
|
||||
models file into `diffusers` and install it.This will enable the model
|
||||
to load and run faster without loss of image quality.
|
||||
|
||||
The usage is identical to `!import_model`. You may point the command
|
||||
to either a downloaded model file on disk, or to a (non-password
|
||||
protected) URL:
|
||||
|
||||
```bash
|
||||
invoke> !convert_model C:/Users/fred/Downloads/martians.safetensors
|
||||
```
|
||||
|
||||
After a successful conversion, the CLI will offer you the option of
|
||||
deleting the original `.ckpt` or `.safetensors` file.
|
||||
|
||||
### Optimizing a previously-installed model
|
||||
|
||||
Lastly, if you have previously installed a `.ckpt` or `.safetensors`
|
||||
file and wish to convert it into a `diffusers` model, you can do this
|
||||
without re-downloading and converting the original file using the
|
||||
`!optimize_model` command. Simply pass the short name of an existing
|
||||
installed model:
|
||||
|
||||
```bash
|
||||
invoke> !optimize_model martians-v1.0
|
||||
```
|
||||
|
||||
The model will be converted into `diffusers` format and replace the
|
||||
previously installed version. You will again be offered the
|
||||
opportunity to delete the original `.ckpt` or `.safetensors` file.
|
||||
|
||||
### Related CLI Commands
|
||||
|
||||
There are a whole series of additional model management commands in
|
||||
the CLI that you can read about in [Command-Line
|
||||
Interface](../features/CLI.md). These include:
|
||||
|
||||
* `!models` - List all installed models
|
||||
* `!switch <model name>` - Switch to the indicated model
|
||||
* `!edit_model <model name>` - Edit the indicated model to change its name, description or other properties
|
||||
* `!del_model <model name>` - Delete the indicated model
|
||||
|
||||
### Manually editing `configs/models.yaml`
|
||||
|
||||
|
||||
If you are comfortable with a text editor then you may simply edit `models.yaml`
|
||||
directly.
|
||||
|
||||
You will need to download the desired `.ckpt/.safetensors` file and
|
||||
place it somewhere on your machine's filesystem. Alternatively, for a
|
||||
`diffusers` model, record the repo_id or download the whole model
|
||||
directory. Then using a **text** editor (e.g. the Windows Notepad
|
||||
application), open the file `configs/models.yaml`, and add a new
|
||||
stanza that follows this model:
|
||||
|
||||
#### A legacy model
|
||||
|
||||
A legacy `.ckpt` or `.safetensors` entry will look like this:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.0:
|
||||
description: A great fine-tune in Arabian Nights style
|
||||
weights: ./path/to/arabian-nights-1.0.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
format: ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
default: false
|
||||
```
|
||||
|
||||
Note that `format` is `ckpt` for both `.ckpt` and `.safetensors` files.
|
||||
|
||||
#### A diffusers model
|
||||
|
||||
A stanza for a `diffusers` model will look like this for a HuggingFace
|
||||
model with a repository ID:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.1:
|
||||
description: An even better fine-tune of the Arabian Nights
|
||||
repo_id: captahab/arabian-nights-1.1
|
||||
format: diffusers
|
||||
default: true
|
||||
```
|
||||
|
||||
And for a downloaded directory:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.1:
|
||||
description: An even better fine-tune of the Arabian Nights
|
||||
path: /path/to/captahab-arabian-nights-1.1
|
||||
format: diffusers
|
||||
default: true
|
||||
```
|
||||
|
||||
There is additional syntax for indicating an external VAE to use with
|
||||
this model. See `INITIAL_MODELS.yaml` and `models.yaml` for examples.
|
||||
|
||||
After you save the modified `models.yaml` file relaunch
|
||||
`invokeai`. The new model will now be available for your use.
|
||||
|
||||
### Installation via the WebUI
|
||||
|
||||
To access the WebUI Model Manager, click on the button that looks like
|
||||
a cube in the upper right side of the browser screen. This will bring
|
||||
up a dialogue that lists the models you have already installed, and
|
||||
allows you to load, delete or edit them:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
To add a new model, click on **+ Add New** and select to either a
|
||||
checkpoint/safetensors model, or a diffusers model:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
In this example, we chose **Add Diffusers**. As shown in the figure
|
||||
below, a new dialogue prompts you to enter the name to use for the
|
||||
model, its description, and either the location of the `diffusers`
|
||||
model on disk, or its Repo ID on the HuggingFace web site. If you
|
||||
choose to enter a path to disk, the system will autocomplete for you
|
||||
as you type:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
Press **Add Model** at the bottom of the dialogue (scrolled out of
|
||||
site in the figure), and the model will be downloaded, imported, and
|
||||
registered in `models.yaml`.
|
||||
|
||||
The **Add Checkpoint/Safetensor Model** option is similar, except that
|
||||
in this case you can choose to scan an entire folder for
|
||||
checkpoint/safetensors files to import. Simply type in the path of the
|
||||
directory and press the "Search" icon. This will display the
|
||||
`.ckpt` and `.safetensors` found inside the directory and its
|
||||
subfolders, and allow you to choose which ones to import:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
## Model Management Startup Options
|
||||
|
||||
The `invoke` launcher and the `invokeai` script accept a series of
|
||||
command-line arguments that modify InvokeAI's behavior when loading
|
||||
models. These can be provided on the command line, or added to the
|
||||
InvokeAI root directory's `invokeai.init` initialization file.
|
||||
|
||||
The arguments are:
|
||||
|
||||
* `--model <model name>` -- Start up with the indicated model loaded
|
||||
* `--ckpt_convert` -- When a checkpoint/safetensors model is loaded, convert it into a `diffusers` model in memory. This does not permanently save the converted model to disk.
|
||||
* `--autoconvert <path/to/directory>` -- Scan the indicated directory path for new checkpoint/safetensors files, convert them into `diffusers` models, and import them into InvokeAI.
|
||||
|
||||
Here is an example of providing an argument on the command line using
|
||||
the `invoke.sh` launch script:
|
||||
|
||||
```bash
|
||||
invoke.sh --autoconvert /home/fred/stable-diffusion-checkpoints
|
||||
```
|
||||
|
||||
And here is what the same argument looks like in `invokeai.init`:
|
||||
|
||||
```bash
|
||||
--outdir="/home/fred/invokeai/outputs
|
||||
--no-nsfw_checker
|
||||
--autoconvert /home/fred/stable-diffusion-checkpoints
|
||||
```
|
||||
The location of the autoimport directories are controlled by settings
|
||||
in `invokeai.yaml`. See [Configuration](../features/CONFIGURATION.md).
|
||||
@@ -15,7 +15,7 @@ See the [troubleshooting
|
||||
section](010_INSTALL_AUTOMATED.md#troubleshooting) of the automated
|
||||
install guide for frequently-encountered installation issues.
|
||||
|
||||
## Main Application
|
||||
## Installation options
|
||||
|
||||
1. [Automated Installer](010_INSTALL_AUTOMATED.md)
|
||||
|
||||
@@ -24,6 +24,9 @@ install guide for frequently-encountered installation issues.
|
||||
"developer console" which will help us debug problems with you and
|
||||
give you to access experimental features.
|
||||
|
||||
|
||||
✅ This is the recommended option for first time users.
|
||||
|
||||
2. [Manual Installation](020_INSTALL_MANUAL.md)
|
||||
|
||||
In this method you will manually run the commands needed to install
|
||||
|
||||
52
docs/nodes/communityNodes.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# Community Nodes
|
||||
|
||||
These are nodes that have been developed by the community, for the community. If you're not sure what a node is, you can learn more about nodes [here](overview.md).
|
||||
|
||||
If you'd like to submit a node for the community, please refer to the [node creation overview](./overview.md#contributing-nodes).
|
||||
|
||||
To download a node, simply download the `.py` node file from the link and add it to the `invokeai/app/invocations/` folder in your Invoke AI install location. Along with the node, an example node graph should be provided to help you get started with the node.
|
||||
|
||||
To use a community node graph, download the the `.json` node graph file and load it into Invoke AI via the **Load Nodes** button on the Node Editor.
|
||||
|
||||
## Disclaimer
|
||||
|
||||
The nodes linked below have been developed and contributed by members of the Invoke AI community. While we strive to ensure the quality and safety of these contributions, we do not guarantee the reliability or security of the nodes. If you have issues or concerns with any of the nodes below, please raise it on GitHub or in the Discord.
|
||||
|
||||
## List of Nodes
|
||||
|
||||
### Face Mask
|
||||
|
||||
**Description:** This node autodetects a face in the image using MediaPipe and masks it by making it transparent. Via outpainting you can swap faces with other faces, or invert the mask and swap things around the face with other things. Additionally, you can supply X and Y offset values to scale/change the shape of the mask for finer control. The node also outputs an all-white mask in the same dimensions as the input image. This is needed by the inpaint node (and unified canvas) for outpainting.
|
||||
|
||||
**Node Link:** https://github.com/ymgenesis/InvokeAI/blob/facemaskmediapipe/invokeai/app/invocations/facemask.py
|
||||
|
||||
**Example Node Graph:** https://www.mediafire.com/file/gohn5sb1bfp8use/21-July_2023-FaceMask.json/file
|
||||
|
||||
**Output Examples**
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
--------------------------------
|
||||
### Super Cool Node Template
|
||||
|
||||
**Description:** This node allows you to do super cool things with InvokeAI.
|
||||
|
||||
**Node Link:** https://github.com/invoke-ai/InvokeAI/fake_node.py
|
||||
|
||||
**Example Node Graph:** https://github.com/invoke-ai/InvokeAI/fake_node_graph.json
|
||||
|
||||
**Output Examples**
|
||||
|
||||

|
||||
|
||||
### Ideal Size
|
||||
|
||||
**Description:** This node calculates an ideal image size for a first pass of a multi-pass upscaling. The aim is to avoid duplication that results from choosing a size larger than the model is capable of.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/ideal-size-node
|
||||
|
||||
## Help
|
||||
If you run into any issues with a node, please post in the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
||||
42
docs/nodes/overview.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Nodes
|
||||
|
||||
## What are Nodes?
|
||||
An Node is simply a single operation that takes in some inputs and gives
|
||||
out some outputs. We can then chain multiple nodes together to create more
|
||||
complex functionality. All InvokeAI features are added through nodes.
|
||||
|
||||
This means nodes can be used to easily extend the image generation capabilities of InvokeAI, and allow you build workflows to suit your needs.
|
||||
|
||||
You can read more about nodes and the node editor [here](../features/NODES.md).
|
||||
|
||||
|
||||
## Downloading Nodes
|
||||
To download a new node, visit our list of [Community Nodes](communityNodes.md). These are nodes that have been created by the community, for the community.
|
||||
|
||||
|
||||
## Contributing Nodes
|
||||
|
||||
To learn about creating a new node, please visit our [Node creation documenation](../contributing/INVOCATIONS.md).
|
||||
|
||||
Once you’ve created a node and confirmed that it behaves as expected locally, follow these steps:
|
||||
* Make sure the node is contained in a new Python (.py) file
|
||||
* Submit a pull request with a link to your node in GitHub against the `nodes` branch to add the node to the [Community Nodes](Community Nodes) list
|
||||
* Make sure you are following the template below and have provided all relevant details about the node and what it does.
|
||||
* A maintainer will review the pull request and node. If the node is aligned with the direction of the project, you might be asked for permission to include it in the core project.
|
||||
|
||||
### Community Node Template
|
||||
|
||||
```markdown
|
||||
--------------------------------
|
||||
### Super Cool Node Template
|
||||
|
||||
**Description:** This node allows you to do super cool things with InvokeAI.
|
||||
|
||||
**Node Link:** https://github.com/invoke-ai/InvokeAI/fake_node.py
|
||||
|
||||
**Example Node Graph:** https://github.com/invoke-ai/InvokeAI/fake_node_graph.json
|
||||
|
||||
**Output Examples**
|
||||
|
||||

|
||||
```
|
||||
@@ -17,67 +17,267 @@ We thank them for all of their time and hard work.
|
||||
|
||||
* @lstein (Lincoln Stein) - Co-maintainer
|
||||
* @blessedcoolant - Co-maintainer
|
||||
* @hipsterusername (Kent Keirsey) - Product Manager
|
||||
* @psychedelicious - Web Team Leader
|
||||
* @hipsterusername (Kent Keirsey) - Co-maintainer, CEO, Positive Vibes
|
||||
* @psychedelicious (Spencer Mabrito) - Web Team Leader
|
||||
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||
* @damian0815 - Attention Systems and Gameplay Engineer
|
||||
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
||||
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
||||
* @tildebyte - General gadfly and resident (self-appointed) know-it-all
|
||||
* @keturn - Lead for Diffusers port
|
||||
* @damian0815 - Attention Systems and Compel Maintainer
|
||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
* @jpphoto (Jonathan Pollack) - Inference and rendering engine optimization
|
||||
* @genomancer (Gregg Helt) - Model training and merging
|
||||
* @genomancer (Gregg Helt) - Controlnet support
|
||||
* @StAlKeR7779 (Sergey Borisov) - Torch stack, ONNX, model management, optimization
|
||||
* @cheerio (Mary Rogers) - Lead Engineer & Web App Development
|
||||
* @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
|
||||
* @ryanjdick (Ryan Dick) - Machine Learning & Training
|
||||
* @millu (Millun Atluri) - Community Manager, Documentation, Node-wrangler
|
||||
* @chainchompa (Jennifer Player) - Web Development & Chain-Chomping
|
||||
* @keturn (Kevin Turner) - Diffusers
|
||||
* @gogurt enjoyer - Discord moderator and end user support
|
||||
* @whosawhatsis - Discord moderator and end user support
|
||||
* @dwinrger - Discord moderator and end user support
|
||||
* @526christian - Discord moderator and end user support
|
||||
|
||||
## **Contributions by**
|
||||
## **Full List of Contributors by Commit Name**
|
||||
|
||||
- [Sean McLellan](https://github.com/Oceanswave)
|
||||
- [Kevin Gibbons](https://github.com/bakkot)
|
||||
- [Tesseract Cat](https://github.com/TesseractCat)
|
||||
- [blessedcoolant](https://github.com/blessedcoolant)
|
||||
- [David Ford](https://github.com/david-ford)
|
||||
- [yunsaki](https://github.com/yunsaki)
|
||||
- [James Reynolds](https://github.com/magnusviri)
|
||||
- [David Wager](https://github.com/maddavid123)
|
||||
- [Jason Toffaletti](https://github.com/toffaletti)
|
||||
- [tildebyte](https://github.com/tildebyte)
|
||||
- [Cragin Godley](https://github.com/cgodley)
|
||||
- [BlueAmulet](https://github.com/BlueAmulet)
|
||||
- [Benjamin Warner](https://github.com/warner-benjamin)
|
||||
- [Cora Johnson-Roberson](https://github.com/corajr)
|
||||
- [veprogames](https://github.com/veprogames)
|
||||
- [JigenD](https://github.com/JigenD)
|
||||
- [Niek van der Maas](https://github.com/Niek)
|
||||
- [Henry van Megen](https://github.com/hvanmegen)
|
||||
- [Håvard Gulldahl](https://github.com/havardgulldahl)
|
||||
- [greentext2](https://github.com/greentext2)
|
||||
- [Simon Vans-Colina](https://github.com/simonvc)
|
||||
- [Gabriel Rotbart](https://github.com/gabrielrotbart)
|
||||
- [Eric Khun](https://github.com/erickhun)
|
||||
- [Brent Ozar](https://github.com/BrentOzar)
|
||||
- [nderscore](https://github.com/nderscore)
|
||||
- [Mikhail Tishin](https://github.com/tishin)
|
||||
- [Tom Elovi Spruce](https://github.com/ilovecomputers)
|
||||
- [spezialspezial](https://github.com/spezialspezial)
|
||||
- [Yosuke Shinya](https://github.com/shinya7y)
|
||||
- [Andy Pilate](https://github.com/Cubox)
|
||||
- [Muhammad Usama](https://github.com/SMUsamaShah)
|
||||
- [Arturo Mendivil](https://github.com/artmen1516)
|
||||
- [Paul Sajna](https://github.com/sajattack)
|
||||
- [Samuel Husso](https://github.com/shusso)
|
||||
- [nicolai256](https://github.com/nicolai256)
|
||||
- [Mihai](https://github.com/mh-dm)
|
||||
- [Any Winter](https://github.com/any-winter-4079)
|
||||
- [Doggettx](https://github.com/doggettx)
|
||||
- [Matthias Wild](https://github.com/mauwii)
|
||||
- [Kyle Schouviller](https://github.com/kyle0654)
|
||||
- [rabidcopy](https://github.com/rabidcopy)
|
||||
- [Dominic Letz](https://github.com/dominicletz)
|
||||
- [Dmitry T.](https://github.com/ArDiouscuros)
|
||||
- [Kent Keirsey](https://github.com/hipsterusername)
|
||||
- [psychedelicious](https://github.com/psychedelicious)
|
||||
- [damian0815](https://github.com/damian0815)
|
||||
- [Eugene Brodsky](https://github.com/ebr)
|
||||
- AbdBarho
|
||||
- ablattmann
|
||||
- AdamOStark
|
||||
- Adam Rice
|
||||
- Airton Silva
|
||||
- Alexander Eichhorn
|
||||
- Alexandre D. Roberge
|
||||
- Andreas Rozek
|
||||
- Andre LaBranche
|
||||
- Andy Bearman
|
||||
- Andy Luhrs
|
||||
- Andy Pilate
|
||||
- Any-Winter-4079
|
||||
- apolinario
|
||||
- ArDiouscuros
|
||||
- Armando C. Santisbon
|
||||
- Arthur Holstvoogd
|
||||
- artmen1516
|
||||
- Artur
|
||||
- Arturo Mendivil
|
||||
- Ben Alkov
|
||||
- Benjamin Warner
|
||||
- Bernard Maltais
|
||||
- blessedcoolant
|
||||
- blhook
|
||||
- BlueAmulet
|
||||
- Bouncyknighter
|
||||
- Brandon Rising
|
||||
- Brent Ozar
|
||||
- Brian Racer
|
||||
- bsilvereagle
|
||||
- c67e708d
|
||||
- CapableWeb
|
||||
- Carson Katri
|
||||
- Chloe
|
||||
- Chris Dawson
|
||||
- Chris Hayes
|
||||
- Chris Jones
|
||||
- chromaticist
|
||||
- Claus F. Strasburger
|
||||
- cmdr2
|
||||
- cody
|
||||
- Conor Reid
|
||||
- Cora Johnson-Roberson
|
||||
- coreco
|
||||
- cosmii02
|
||||
- cpacker
|
||||
- Cragin Godley
|
||||
- creachec
|
||||
- Damian Stewart
|
||||
- Daniel Manzke
|
||||
- Danny Beer
|
||||
- Dan Sully
|
||||
- David Burnett
|
||||
- David Ford
|
||||
- David Regla
|
||||
- David Wager
|
||||
- Daya Adianto
|
||||
- db3000
|
||||
- Denis Olshin
|
||||
- Dennis
|
||||
- Dominic Letz
|
||||
- DrGunnarMallon
|
||||
- Edward Johan
|
||||
- elliotsayes
|
||||
- Elrik
|
||||
- ElrikUnderlake
|
||||
- Eric Khun
|
||||
- Eric Wolf
|
||||
- Eugene Brodsky
|
||||
- ExperimentalCyborg
|
||||
- Fabian Bahl
|
||||
- Fabio 'MrWHO' Torchetti
|
||||
- fattire
|
||||
- Felipe Nogueira
|
||||
- Félix Sanz
|
||||
- figgefigge
|
||||
- Gabriel Mackievicz Telles
|
||||
- gabrielrotbart
|
||||
- gallegonovato
|
||||
- Gérald LONLAS
|
||||
- GitHub Actions Bot
|
||||
- gogurtenjoyer
|
||||
- greentext2
|
||||
- Gregg Helt
|
||||
- H4rk
|
||||
- Håvard Gulldahl
|
||||
- henry
|
||||
- Henry van Megen
|
||||
- hipsterusername
|
||||
- hj
|
||||
- Hosted Weblate
|
||||
- Iman Karim
|
||||
- ismail ihsan bülbül
|
||||
- Ivan Efimov
|
||||
- jakehl
|
||||
- Jakub Kolčář
|
||||
- JamDon2
|
||||
- James Reynolds
|
||||
- Jan Skurovec
|
||||
- Jari Vetoniemi
|
||||
- Jason Toffaletti
|
||||
- Jaulustus
|
||||
- Jeff Mahoney
|
||||
- jeremy
|
||||
- Jeremy Clark
|
||||
- JigenD
|
||||
- Jim Hays
|
||||
- Johan Roxendal
|
||||
- Johnathon Selstad
|
||||
- Jonathan
|
||||
- Joseph Dries III
|
||||
- JPPhoto
|
||||
- jspraul
|
||||
- Justin Wong
|
||||
- Juuso V
|
||||
- Kaspar Emanuel
|
||||
- Katsuyuki-Karasawa
|
||||
- Kent Keirsey
|
||||
- Kevin Coakley
|
||||
- Kevin Gibbons
|
||||
- Kevin Schaul
|
||||
- Kevin Turner
|
||||
- krummrey
|
||||
- Kyle Lacy
|
||||
- Kyle Schouviller
|
||||
- Lawrence Norton
|
||||
- LemonDouble
|
||||
- Leo Pasanen
|
||||
- Lincoln Stein
|
||||
- LoganPederson
|
||||
- Lynne Whitehorn
|
||||
- majick
|
||||
- Marco Labarile
|
||||
- Martin Kristiansen
|
||||
- Mary Hipp Rogers
|
||||
- mastercaster9000
|
||||
- Matthias Wild
|
||||
- michaelk71
|
||||
- mickr777
|
||||
- Mihai
|
||||
- Mihail Dumitrescu
|
||||
- Mikhail Tishin
|
||||
- Millun Atluri
|
||||
- Minjune Song
|
||||
- mitien
|
||||
- mofuzz
|
||||
- Muhammad Usama
|
||||
- Name
|
||||
- _nderscore
|
||||
- Netzer R
|
||||
- Nicholas Koh
|
||||
- Nicholas Körfer
|
||||
- nicolai256
|
||||
- Niek van der Maas
|
||||
- noodlebox
|
||||
- Nuno Coração
|
||||
- ofirkris
|
||||
- Olivier Louvignes
|
||||
- owenvincent
|
||||
- Patrick Esser
|
||||
- Patrick Tien
|
||||
- Patrick von Platen
|
||||
- Paul Sajna
|
||||
- pejotr
|
||||
- Peter Baylies
|
||||
- Peter Lin
|
||||
- plucked
|
||||
- prixt
|
||||
- psychedelicious
|
||||
- Rainer Bernhardt
|
||||
- Riccardo Giovanetti
|
||||
- Rich Jones
|
||||
- rmagur1203
|
||||
- Rob Baines
|
||||
- Robert Bolender
|
||||
- Robin Rombach
|
||||
- Rohan Barar
|
||||
- rpagliuca
|
||||
- rromb
|
||||
- Rupesh Sreeraman
|
||||
- Ryan Cao
|
||||
- Saifeddine
|
||||
- Saifeddine ALOUI
|
||||
- SammCheese
|
||||
- Sammy
|
||||
- sammyf
|
||||
- Samuel Husso
|
||||
- Scott Lahteine
|
||||
- Sean McLellan
|
||||
- Sebastian Aigner
|
||||
- Sergey Borisov
|
||||
- Sergey Krashevich
|
||||
- Shapor Naghibzadeh
|
||||
- Shawn Zhong
|
||||
- Simon Vans-Colina
|
||||
- skunkworxdark
|
||||
- slashtechno
|
||||
- spezialspezial
|
||||
- ssantos
|
||||
- StAlKeR7779
|
||||
- Stephan Koglin-Fischer
|
||||
- SteveCaruso
|
||||
- Steve Martinelli
|
||||
- Steven Frank
|
||||
- System X - Files
|
||||
- Taylor Kems
|
||||
- techicode
|
||||
- techybrain-dev
|
||||
- tesseractcat
|
||||
- thealanle
|
||||
- Thomas
|
||||
- tildebyte
|
||||
- Tim Cabbage
|
||||
- Tom
|
||||
- Tom Elovi Spruce
|
||||
- Tom Gouville
|
||||
- tomosuto
|
||||
- Travco
|
||||
- Travis Palmer
|
||||
- tyler
|
||||
- unknown
|
||||
- user1
|
||||
- Vedant Madane
|
||||
- veprogames
|
||||
- wa.code
|
||||
- wfng92
|
||||
- whosawhatsis
|
||||
- Will
|
||||
- William Becher
|
||||
- William Chong
|
||||
- xra
|
||||
- Yeung Yiu Hung
|
||||
- ymgenesis
|
||||
- Yorzaren
|
||||
- Yosuke Shinya
|
||||
- yun saki
|
||||
- Zadagu
|
||||
- zeptofine
|
||||
- 冯不游
|
||||
- 唐澤 克幸
|
||||
|
||||
## **Original CompVis Authors**
|
||||
|
||||
|
||||
@@ -58,7 +58,8 @@ class ApiDependencies:
|
||||
|
||||
@staticmethod
|
||||
def initialize(config: InvokeAIAppConfig, event_handler_id: int, logger: Logger = logger):
|
||||
logger.debug(f"InvokeAI version {__version__}")
|
||||
logger.info(f"InvokeAI version {__version__}")
|
||||
logger.info(f"Root directory = {str(config.root_path)}")
|
||||
logger.debug(f"Internet connectivity is {config.internet_available}")
|
||||
|
||||
events = FastAPIEventService(event_handler_id)
|
||||
|
||||
@@ -9,6 +9,7 @@ from fastapi_events.dispatcher import dispatch
|
||||
|
||||
from ..services.events import EventServiceBase
|
||||
|
||||
|
||||
class FastAPIEventService(EventServiceBase):
|
||||
event_handler_id: int
|
||||
__queue: Queue
|
||||
@@ -27,9 +28,6 @@ class FastAPIEventService(EventServiceBase):
|
||||
self.__queue.put(None)
|
||||
|
||||
def dispatch(self, event_name: str, payload: Any) -> None:
|
||||
# TODO: Remove next two debugging lines
|
||||
from .dependencies import ApiDependencies
|
||||
ApiDependencies.invoker.services.logger.debug(f'dispatch {event_name} / {payload}')
|
||||
self.__queue.put(dict(event_name=event_name, payload=payload))
|
||||
|
||||
async def __dispatch_from_queue(self, stop_event: threading.Event):
|
||||
|
||||
@@ -1,9 +1,22 @@
|
||||
from enum import Enum
|
||||
from fastapi import Body
|
||||
from fastapi.routing import APIRouter
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.backend.image_util.patchmatch import PatchMatch
|
||||
from invokeai.version import __version__
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
from invokeai.backend.util.logging import logging
|
||||
|
||||
class LogLevel(int, Enum):
|
||||
NotSet = logging.NOTSET
|
||||
Debug = logging.DEBUG
|
||||
Info = logging.INFO
|
||||
Warning = logging.WARNING
|
||||
Error = logging.ERROR
|
||||
Critical = logging.CRITICAL
|
||||
|
||||
app_router = APIRouter(prefix="/v1/app", tags=["app"])
|
||||
|
||||
|
||||
@@ -34,3 +47,27 @@ async def get_config() -> AppConfig:
|
||||
if PatchMatch.patchmatch_available():
|
||||
infill_methods.append('patchmatch')
|
||||
return AppConfig(infill_methods=infill_methods)
|
||||
|
||||
@app_router.get(
|
||||
"/logging",
|
||||
operation_id="get_log_level",
|
||||
responses={200: {"description" : "The operation was successful"}},
|
||||
response_model = LogLevel,
|
||||
)
|
||||
async def get_log_level(
|
||||
) -> LogLevel:
|
||||
"""Returns the log level"""
|
||||
return LogLevel(ApiDependencies.invoker.services.logger.level)
|
||||
|
||||
@app_router.post(
|
||||
"/logging",
|
||||
operation_id="set_log_level",
|
||||
responses={200: {"description" : "The operation was successful"}},
|
||||
response_model = LogLevel,
|
||||
)
|
||||
async def set_log_level(
|
||||
level: LogLevel = Body(description="New log verbosity level"),
|
||||
) -> LogLevel:
|
||||
"""Sets the log verbosity level"""
|
||||
ApiDependencies.invoker.services.logger.setLevel(level)
|
||||
return LogLevel(ApiDependencies.invoker.services.logger.level)
|
||||
|
||||
@@ -24,11 +24,14 @@ async def create_board_image(
|
||||
):
|
||||
"""Creates a board_image"""
|
||||
try:
|
||||
result = ApiDependencies.invoker.services.board_images.add_image_to_board(board_id=board_id, image_name=image_name)
|
||||
result = ApiDependencies.invoker.services.board_images.add_image_to_board(
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
return result
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail="Failed to add to board")
|
||||
|
||||
|
||||
|
||||
@board_images_router.delete(
|
||||
"/",
|
||||
operation_id="remove_board_image",
|
||||
@@ -43,27 +46,10 @@ async def remove_board_image(
|
||||
):
|
||||
"""Deletes a board_image"""
|
||||
try:
|
||||
result = ApiDependencies.invoker.services.board_images.remove_image_from_board(board_id=board_id, image_name=image_name)
|
||||
result = ApiDependencies.invoker.services.board_images.remove_image_from_board(
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
return result
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail="Failed to update board")
|
||||
|
||||
|
||||
|
||||
@board_images_router.get(
|
||||
"/{board_id}",
|
||||
operation_id="list_board_images",
|
||||
response_model=OffsetPaginatedResults[ImageDTO],
|
||||
)
|
||||
async def list_board_images(
|
||||
board_id: str = Path(description="The id of the board"),
|
||||
offset: int = Query(default=0, description="The page offset"),
|
||||
limit: int = Query(default=10, description="The number of boards per page"),
|
||||
) -> OffsetPaginatedResults[ImageDTO]:
|
||||
"""Gets a list of images for a board"""
|
||||
|
||||
results = ApiDependencies.invoker.services.board_images.get_images_for_board(
|
||||
board_id,
|
||||
)
|
||||
return results
|
||||
|
||||
|
||||
@@ -1,16 +1,28 @@
|
||||
from typing import Optional, Union
|
||||
|
||||
from fastapi import Body, HTTPException, Path, Query
|
||||
from fastapi.routing import APIRouter
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.services.board_record_storage import BoardChanges
|
||||
from invokeai.app.services.image_record_storage import OffsetPaginatedResults
|
||||
from invokeai.app.services.models.board_record import BoardDTO
|
||||
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
boards_router = APIRouter(prefix="/v1/boards", tags=["boards"])
|
||||
|
||||
|
||||
class DeleteBoardResult(BaseModel):
|
||||
board_id: str = Field(description="The id of the board that was deleted.")
|
||||
deleted_board_images: list[str] = Field(
|
||||
description="The image names of the board-images relationships that were deleted."
|
||||
)
|
||||
deleted_images: list[str] = Field(
|
||||
description="The names of the images that were deleted."
|
||||
)
|
||||
|
||||
|
||||
@boards_router.post(
|
||||
"/",
|
||||
operation_id="create_board",
|
||||
@@ -69,25 +81,42 @@ async def update_board(
|
||||
raise HTTPException(status_code=500, detail="Failed to update board")
|
||||
|
||||
|
||||
@boards_router.delete("/{board_id}", operation_id="delete_board")
|
||||
@boards_router.delete(
|
||||
"/{board_id}", operation_id="delete_board", response_model=DeleteBoardResult
|
||||
)
|
||||
async def delete_board(
|
||||
board_id: str = Path(description="The id of board to delete"),
|
||||
include_images: Optional[bool] = Query(
|
||||
description="Permanently delete all images on the board", default=False
|
||||
),
|
||||
) -> None:
|
||||
) -> DeleteBoardResult:
|
||||
"""Deletes a board"""
|
||||
try:
|
||||
if include_images is True:
|
||||
deleted_images = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
|
||||
board_id=board_id
|
||||
)
|
||||
ApiDependencies.invoker.services.images.delete_images_on_board(
|
||||
board_id=board_id
|
||||
)
|
||||
ApiDependencies.invoker.services.boards.delete(board_id=board_id)
|
||||
return DeleteBoardResult(
|
||||
board_id=board_id,
|
||||
deleted_board_images=[],
|
||||
deleted_images=deleted_images,
|
||||
)
|
||||
else:
|
||||
deleted_board_images = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
|
||||
board_id=board_id
|
||||
)
|
||||
ApiDependencies.invoker.services.boards.delete(board_id=board_id)
|
||||
return DeleteBoardResult(
|
||||
board_id=board_id,
|
||||
deleted_board_images=deleted_board_images,
|
||||
deleted_images=[],
|
||||
)
|
||||
except Exception as e:
|
||||
# TODO: Does this need any exception handling at all?
|
||||
pass
|
||||
raise HTTPException(status_code=500, detail="Failed to delete board")
|
||||
|
||||
|
||||
@boards_router.get(
|
||||
@@ -115,3 +144,19 @@ async def list_boards(
|
||||
status_code=400,
|
||||
detail="Invalid request: Must provide either 'all' or both 'offset' and 'limit'",
|
||||
)
|
||||
|
||||
|
||||
@boards_router.get(
|
||||
"/{board_id}/image_names",
|
||||
operation_id="list_all_board_image_names",
|
||||
response_model=list[str],
|
||||
)
|
||||
async def list_all_board_image_names(
|
||||
board_id: str = Path(description="The id of the board"),
|
||||
) -> list[str]:
|
||||
"""Gets a list of images for a board"""
|
||||
|
||||
image_names = ApiDependencies.invoker.services.board_images.get_all_board_image_names_for_board(
|
||||
board_id,
|
||||
)
|
||||
return image_names
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import io
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import (Body, HTTPException, Path, Query, Request, Response,
|
||||
UploadFile)
|
||||
from fastapi import Body, HTTPException, Path, Query, Request, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.routing import APIRouter
|
||||
from PIL import Image
|
||||
@@ -11,9 +10,11 @@ from invokeai.app.invocations.metadata import ImageMetadata
|
||||
from invokeai.app.models.image import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.services.image_record_storage import OffsetPaginatedResults
|
||||
from invokeai.app.services.item_storage import PaginatedResults
|
||||
from invokeai.app.services.models.image_record import (ImageDTO,
|
||||
ImageRecordChanges,
|
||||
ImageUrlsDTO)
|
||||
from invokeai.app.services.models.image_record import (
|
||||
ImageDTO,
|
||||
ImageRecordChanges,
|
||||
ImageUrlsDTO,
|
||||
)
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
@@ -39,9 +40,15 @@ async def upload_image(
|
||||
response: Response,
|
||||
image_category: ImageCategory = Query(description="The category of the image"),
|
||||
is_intermediate: bool = Query(description="Whether this is an intermediate image"),
|
||||
board_id: Optional[str] = Query(
|
||||
default=None, description="The board to add this image to, if any"
|
||||
),
|
||||
session_id: Optional[str] = Query(
|
||||
default=None, description="The session ID associated with this upload, if any"
|
||||
),
|
||||
crop_visible: Optional[bool] = Query(
|
||||
default=False, description="Whether to crop the image"
|
||||
),
|
||||
) -> ImageDTO:
|
||||
"""Uploads an image"""
|
||||
if not file.content_type.startswith("image"):
|
||||
@@ -51,6 +58,9 @@ async def upload_image(
|
||||
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
if crop_visible:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
except:
|
||||
# Error opening the image
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
@@ -61,6 +71,7 @@ async def upload_image(
|
||||
image_origin=ResourceOrigin.EXTERNAL,
|
||||
image_category=image_category,
|
||||
session_id=session_id,
|
||||
board_id=board_id,
|
||||
is_intermediate=is_intermediate,
|
||||
)
|
||||
|
||||
@@ -85,6 +96,18 @@ async def delete_image(
|
||||
pass
|
||||
|
||||
|
||||
@images_router.post("/clear-intermediates", operation_id="clear_intermediates")
|
||||
async def clear_intermediates() -> int:
|
||||
"""Clears all intermediates"""
|
||||
|
||||
try:
|
||||
count_deleted = ApiDependencies.invoker.services.images.delete_intermediates()
|
||||
return count_deleted
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail="Failed to clear intermediates")
|
||||
pass
|
||||
|
||||
|
||||
@images_router.patch(
|
||||
"/{image_name}",
|
||||
operation_id="update_image",
|
||||
@@ -119,6 +142,7 @@ async def get_image_dto(
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@images_router.get(
|
||||
"/{image_name}/metadata",
|
||||
operation_id="get_image_metadata",
|
||||
@@ -234,16 +258,17 @@ async def get_image_urls(
|
||||
)
|
||||
async def list_image_dtos(
|
||||
image_origin: Optional[ResourceOrigin] = Query(
|
||||
default=None, description="The origin of images to list"
|
||||
default=None, description="The origin of images to list."
|
||||
),
|
||||
categories: Optional[list[ImageCategory]] = Query(
|
||||
default=None, description="The categories of image to include"
|
||||
default=None, description="The categories of image to include."
|
||||
),
|
||||
is_intermediate: Optional[bool] = Query(
|
||||
default=None, description="Whether to list intermediate images"
|
||||
default=None, description="Whether to list intermediate images."
|
||||
),
|
||||
board_id: Optional[str] = Query(
|
||||
default=None, description="The board id to filter by"
|
||||
default=None,
|
||||
description="The board id to filter by. Use 'none' to find images without a board.",
|
||||
),
|
||||
offset: int = Query(default=0, description="The page offset"),
|
||||
limit: int = Query(default=10, description="The number of images per page"),
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
|
||||
import pathlib
|
||||
import threading
|
||||
from typing import Literal, List, Optional, Union
|
||||
|
||||
from fastapi import Body, Path, Query, Response
|
||||
@@ -128,43 +127,54 @@ async def update_model(
|
||||
"/import",
|
||||
operation_id="import_model",
|
||||
responses= {
|
||||
200: {"description" : "The path was queued for import"},
|
||||
201: {"description" : "The model imported successfully"},
|
||||
404: {"description" : "The model could not be found"},
|
||||
415: {"description" : "Unrecognized file/folder format"},
|
||||
424: {"description" : "The model appeared to import successfully, but could not be found in the model manager"},
|
||||
409: {"description" : "There is already a model corresponding to this path or repo_id"},
|
||||
},
|
||||
status_code=200
|
||||
status_code=201,
|
||||
response_model=ImportModelResponse
|
||||
)
|
||||
async def import_model(
|
||||
location: str = Body(description="A model path, repo_id or URL to import"),
|
||||
prediction_type: Optional[Literal['v_prediction','epsilon','sample']] = \
|
||||
Body(description='Prediction type for SDv2 checkpoint files', default="v_prediction"),
|
||||
) -> str:
|
||||
"""
|
||||
Add a model using its local path, repo_id, or remote URL. Model characteristics will be probed and configured automatically.
|
||||
This call launches a background thread to process the imported model and always succeeds. Results are reported in the background
|
||||
as the following events:
|
||||
- model_import_started(import_path:str)
|
||||
- model_import_completed(import_path:str, import_info:AddModelResults, success:bool, error:str)
|
||||
- download_started(url:str)
|
||||
- download_progress(url:str, downloaded_bytes:int, total_bytes:int)
|
||||
- download_completed(url:str, status_code:int, download_path:str)
|
||||
"""
|
||||
) -> ImportModelResponse:
|
||||
""" Add a model using its local path, repo_id, or remote URL. Model characteristics will be probed and configured automatically """
|
||||
|
||||
items_to_import = {location}
|
||||
prediction_types = { x.value: x for x in SchedulerPredictionType }
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
events = ApiDependencies.invoker.services.events
|
||||
|
||||
try:
|
||||
import_thread = threading.Thread(target = ApiDependencies.invoker.services.model_manager.heuristic_import,
|
||||
kwargs = dict(items_to_import = items_to_import,
|
||||
prediction_type_helper = lambda x: prediction_types.get(prediction_type),
|
||||
event_bus = events,
|
||||
)
|
||||
)
|
||||
import_thread.start()
|
||||
return 'request queued'
|
||||
except Exception as e:
|
||||
installed_models = ApiDependencies.invoker.services.model_manager.heuristic_import(
|
||||
items_to_import = items_to_import,
|
||||
prediction_type_helper = lambda x: prediction_types.get(prediction_type)
|
||||
)
|
||||
info = installed_models.get(location)
|
||||
|
||||
if not info:
|
||||
logger.error("Import failed")
|
||||
raise HTTPException(status_code=415)
|
||||
|
||||
logger.info(f'Successfully imported {location}, got {info}')
|
||||
model_raw = ApiDependencies.invoker.services.model_manager.list_model(
|
||||
model_name=info.name,
|
||||
base_model=info.base_model,
|
||||
model_type=info.model_type
|
||||
)
|
||||
return parse_obj_as(ImportModelResponse, model_raw)
|
||||
|
||||
except ModelNotFoundException as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except InvalidModelException as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=415)
|
||||
except ValueError as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
|
||||
@models_router.post(
|
||||
"/add",
|
||||
@@ -305,20 +315,21 @@ async def list_ckpt_configs(
|
||||
return ApiDependencies.invoker.services.model_manager.list_checkpoint_configs()
|
||||
|
||||
|
||||
@models_router.get(
|
||||
@models_router.post(
|
||||
"/sync",
|
||||
operation_id="sync_to_config",
|
||||
responses={
|
||||
201: { "description": "synchronization successful" },
|
||||
},
|
||||
status_code = 201,
|
||||
response_model = None
|
||||
response_model = bool
|
||||
)
|
||||
async def sync_to_config(
|
||||
)->None:
|
||||
)->bool:
|
||||
"""Call after making changes to models.yaml, autoimport directories or models directory to synchronize
|
||||
in-memory data structures with disk data structures."""
|
||||
return ApiDependencies.invoker.services.model_manager.sync_to_config()
|
||||
ApiDependencies.invoker.services.model_manager.sync_to_config()
|
||||
return True
|
||||
|
||||
@models_router.put(
|
||||
"/merge/{base_model}",
|
||||
@@ -363,50 +374,3 @@ async def merge_models(
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
return response
|
||||
|
||||
# The rename operation is now supported by update_model and no longer needs to be
|
||||
# a standalone route.
|
||||
# @models_router.post(
|
||||
# "/rename/{base_model}/{model_type}/{model_name}",
|
||||
# operation_id="rename_model",
|
||||
# responses= {
|
||||
# 201: {"description" : "The model was renamed successfully"},
|
||||
# 404: {"description" : "The model could not be found"},
|
||||
# 409: {"description" : "There is already a model corresponding to the new name"},
|
||||
# },
|
||||
# status_code=201,
|
||||
# response_model=ImportModelResponse
|
||||
# )
|
||||
# async def rename_model(
|
||||
# base_model: BaseModelType = Path(description="Base model"),
|
||||
# model_type: ModelType = Path(description="The type of model"),
|
||||
# model_name: str = Path(description="current model name"),
|
||||
# new_name: Optional[str] = Query(description="new model name", default=None),
|
||||
# new_base: Optional[BaseModelType] = Query(description="new model base", default=None),
|
||||
# ) -> ImportModelResponse:
|
||||
# """ Rename a model"""
|
||||
|
||||
# logger = ApiDependencies.invoker.services.logger
|
||||
|
||||
# try:
|
||||
# result = ApiDependencies.invoker.services.model_manager.rename_model(
|
||||
# base_model = base_model,
|
||||
# model_type = model_type,
|
||||
# model_name = model_name,
|
||||
# new_name = new_name,
|
||||
# new_base = new_base,
|
||||
# )
|
||||
# logger.debug(result)
|
||||
# logger.info(f'Successfully renamed {model_name}=>{new_name}')
|
||||
# model_raw = ApiDependencies.invoker.services.model_manager.list_model(
|
||||
# model_name=new_name or model_name,
|
||||
# base_model=new_base or base_model,
|
||||
# model_type=model_type
|
||||
# )
|
||||
# return parse_obj_as(ImportModelResponse, model_raw)
|
||||
# except ModelNotFoundException as e:
|
||||
# logger.error(str(e))
|
||||
# raise HTTPException(status_code=404, detail=str(e))
|
||||
# except ValueError as e:
|
||||
# logger.error(str(e))
|
||||
# raise HTTPException(status_code=409, detail=str(e))
|
||||
|
||||
@@ -4,6 +4,7 @@ import sys
|
||||
from inspect import signature
|
||||
|
||||
import uvicorn
|
||||
import socket
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
@@ -193,9 +194,22 @@ app.mount("/",
|
||||
)
|
||||
|
||||
def invoke_api():
|
||||
def find_port(port: int):
|
||||
"""Find a port not in use starting at given port"""
|
||||
# Taken from https://waylonwalker.com/python-find-available-port/, thanks Waylon!
|
||||
# https://github.com/WaylonWalker
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
if s.connect_ex(("localhost", port)) == 0:
|
||||
return find_port(port=port + 1)
|
||||
else:
|
||||
return port
|
||||
|
||||
port = find_port(app_config.port)
|
||||
if port != app_config.port:
|
||||
logger.warn(f"Port {app_config.port} in use, using port {port}")
|
||||
# Start our own event loop for eventing usage
|
||||
loop = asyncio.new_event_loop()
|
||||
config = uvicorn.Config(app=app, host=app_config.host, port=app_config.port, loop=loop)
|
||||
config = uvicorn.Config(app=app, host=app_config.host, port=port, loop=loop)
|
||||
# Use access_log to turn off logging
|
||||
server = uvicorn.Server(config)
|
||||
loop.run_until_complete(server.serve())
|
||||
|
||||
@@ -331,8 +331,8 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
crop_left: int = Field(0, description="")
|
||||
target_width: int = Field(1024, description="")
|
||||
target_height: int = Field(1024, description="")
|
||||
clip1: ClipField = Field(None, description="Clip to use")
|
||||
clip2: ClipField = Field(None, description="Clip to use")
|
||||
clip: ClipField = Field(None, description="Clip to use")
|
||||
clip2: ClipField = Field(None, description="Clip2 to use")
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
@@ -348,7 +348,7 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> CompelOutput:
|
||||
c1, c1_pooled, ec1 = self.run_clip_compel(context, self.clip1, self.prompt, False)
|
||||
c1, c1_pooled, ec1 = self.run_clip_compel(context, self.clip, self.prompt, False)
|
||||
if self.style.strip() == "":
|
||||
c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.prompt, True)
|
||||
else:
|
||||
@@ -451,8 +451,8 @@ class SDXLRawPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
crop_left: int = Field(0, description="")
|
||||
target_width: int = Field(1024, description="")
|
||||
target_height: int = Field(1024, description="")
|
||||
clip1: ClipField = Field(None, description="Clip to use")
|
||||
clip2: ClipField = Field(None, description="Clip to use")
|
||||
clip: ClipField = Field(None, description="Clip to use")
|
||||
clip2: ClipField = Field(None, description="Clip2 to use")
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
@@ -468,7 +468,7 @@ class SDXLRawPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> CompelOutput:
|
||||
c1, c1_pooled, ec1 = self.run_clip_raw(context, self.clip1, self.prompt, False)
|
||||
c1, c1_pooled, ec1 = self.run_clip_raw(context, self.clip, self.prompt, False)
|
||||
if self.style.strip() == "":
|
||||
c2, c2_pooled, ec2 = self.run_clip_raw(context, self.clip2, self.prompt, True)
|
||||
else:
|
||||
|
||||
@@ -85,8 +85,8 @@ CONTROLNET_DEFAULT_MODELS = [
|
||||
CONTROLNET_NAME_VALUES = Literal[tuple(CONTROLNET_DEFAULT_MODELS)]
|
||||
CONTROLNET_MODE_VALUES = Literal[tuple(
|
||||
["balanced", "more_prompt", "more_control", "unbalanced"])]
|
||||
# crop and fill options not ready yet
|
||||
# CONTROLNET_RESIZE_VALUES = Literal[tuple(["just_resize", "crop_resize", "fill_resize"])]
|
||||
CONTROLNET_RESIZE_VALUES = Literal[tuple(
|
||||
["just_resize", "crop_resize", "fill_resize", "just_resize_simple",])]
|
||||
|
||||
|
||||
class ControlNetModelField(BaseModel):
|
||||
@@ -111,7 +111,8 @@ class ControlField(BaseModel):
|
||||
description="When the ControlNet is last applied (% of total steps)")
|
||||
control_mode: CONTROLNET_MODE_VALUES = Field(
|
||||
default="balanced", description="The control mode to use")
|
||||
# resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(
|
||||
default="just_resize", description="The resize mode to use")
|
||||
|
||||
@validator("control_weight")
|
||||
def validate_control_weight(cls, v):
|
||||
@@ -161,6 +162,7 @@ class ControlNetInvocation(BaseInvocation):
|
||||
end_step_percent: float = Field(default=1, ge=0, le=1,
|
||||
description="When the ControlNet is last applied (% of total steps)")
|
||||
control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The control mode used")
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode used")
|
||||
# fmt: on
|
||||
|
||||
class Config(InvocationConfig):
|
||||
@@ -187,6 +189,7 @@ class ControlNetInvocation(BaseInvocation):
|
||||
begin_step_percent=self.begin_step_percent,
|
||||
end_step_percent=self.end_step_percent,
|
||||
control_mode=self.control_mode,
|
||||
resize_mode=self.resize_mode,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -518,8 +518,8 @@ class ImageScaleInvocation(BaseInvocation, PILInvocationConfig):
|
||||
type: Literal["img_scale"] = "img_scale"
|
||||
|
||||
# Inputs
|
||||
image: Optional[ImageField] = Field(default=None, description="The image to scale")
|
||||
scale_factor: float = Field(gt=0, description="The factor by which to scale the image")
|
||||
image: Optional[ImageField] = Field(default=None, description="The image to scale")
|
||||
scale_factor: Optional[float] = Field(default=2.0, gt=0, description="The factor by which to scale the image")
|
||||
resample_mode: PIL_RESAMPLING_MODES = Field(default="bicubic", description="The resampling mode")
|
||||
# fmt: on
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ from ...backend.stable_diffusion.diffusers_pipeline import (
|
||||
from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import \
|
||||
PostprocessingSettings
|
||||
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
|
||||
from ...backend.util.devices import torch_dtype
|
||||
from ...backend.util.devices import choose_torch_device, torch_dtype, choose_precision
|
||||
from ..models.image import ImageCategory, ImageField, ResourceOrigin
|
||||
from .baseinvocation import (BaseInvocation, BaseInvocationOutput,
|
||||
InvocationConfig, InvocationContext)
|
||||
@@ -30,6 +30,7 @@ from .compel import ConditioningField
|
||||
from .controlnet_image_processors import ControlField
|
||||
from .image import ImageOutput
|
||||
from .model import ModelInfo, UNetField, VaeField
|
||||
from invokeai.app.util.controlnet_utils import prepare_control_image
|
||||
|
||||
from diffusers.models.attention_processor import (
|
||||
AttnProcessor2_0,
|
||||
@@ -39,6 +40,9 @@ from diffusers.models.attention_processor import (
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_PRECISION = choose_precision(choose_torch_device())
|
||||
|
||||
|
||||
class LatentsField(BaseModel):
|
||||
"""A latents field used for passing latents between invocations"""
|
||||
|
||||
@@ -285,7 +289,7 @@ class TextToLatentsInvocation(BaseInvocation):
|
||||
# and add in batch_size, num_images_per_prompt?
|
||||
# and do real check for classifier_free_guidance?
|
||||
# prepare_control_image should return torch.Tensor of shape(batch_size, 3, height, width)
|
||||
control_image = model.prepare_control_image(
|
||||
control_image = prepare_control_image(
|
||||
image=input_image,
|
||||
do_classifier_free_guidance=do_classifier_free_guidance,
|
||||
width=control_width_resize,
|
||||
@@ -295,13 +299,18 @@ class TextToLatentsInvocation(BaseInvocation):
|
||||
device=control_model.device,
|
||||
dtype=control_model.dtype,
|
||||
control_mode=control_info.control_mode,
|
||||
resize_mode=control_info.resize_mode,
|
||||
)
|
||||
control_item = ControlNetData(
|
||||
model=control_model, image_tensor=control_image,
|
||||
model=control_model,
|
||||
image_tensor=control_image,
|
||||
weight=control_info.control_weight,
|
||||
begin_step_percent=control_info.begin_step_percent,
|
||||
end_step_percent=control_info.end_step_percent,
|
||||
control_mode=control_info.control_mode,
|
||||
# any resizing needed should currently be happening in prepare_control_image(),
|
||||
# but adding resize_mode to ControlNetData in case needed in the future
|
||||
resize_mode=control_info.resize_mode,
|
||||
)
|
||||
control_data.append(control_item)
|
||||
# MultiControlNetModel has been refactored out, just need list[ControlNetData]
|
||||
@@ -493,7 +502,7 @@ class LatentsToImageInvocation(BaseInvocation):
|
||||
tiled: bool = Field(
|
||||
default=False,
|
||||
description="Decode latents by overlaping tiles(less memory consumption)")
|
||||
fp32: bool = Field(False, description="Decode in full precision")
|
||||
fp32: bool = Field(DEFAULT_PRECISION=='float32', description="Decode in full precision")
|
||||
metadata: Optional[CoreMetadata] = Field(default=None, description="Optional core metadata to be written to the image")
|
||||
|
||||
# Schema customisation
|
||||
@@ -598,7 +607,7 @@ class ResizeLatentsInvocation(BaseInvocation):
|
||||
antialias: bool = Field(
|
||||
default=False,
|
||||
description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
|
||||
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
@@ -644,7 +653,7 @@ class ScaleLatentsInvocation(BaseInvocation):
|
||||
antialias: bool = Field(
|
||||
default=False,
|
||||
description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
|
||||
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
@@ -687,7 +696,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
tiled: bool = Field(
|
||||
default=False,
|
||||
description="Encode latents by overlaping tiles(less memory consumption)")
|
||||
fp32: bool = Field(False, description="Decode in full precision")
|
||||
fp32: bool = Field(DEFAULT_PRECISION=='float32', description="Decode in full precision")
|
||||
|
||||
|
||||
# Schema customisation
|
||||
@@ -755,7 +764,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
dtype=vae.dtype
|
||||
) # FIXME: uses torch.randn. make reproducible!
|
||||
|
||||
latents = 0.18215 * latents
|
||||
latents = vae.config.scaling_factor * latents
|
||||
latents = latents.to(dtype=orig_dtype)
|
||||
|
||||
name = f"{context.graph_execution_state_id}__{self.id}"
|
||||
|
||||
@@ -6,6 +6,7 @@ from typing import List, Literal, Optional, Union
|
||||
from pydantic import Field, validator
|
||||
|
||||
from ...backend.model_management import ModelType, SubModelType
|
||||
from invokeai.app.util.step_callback import stable_diffusion_xl_step_callback
|
||||
from .baseinvocation import (BaseInvocation, BaseInvocationOutput,
|
||||
InvocationConfig, InvocationContext)
|
||||
|
||||
@@ -243,10 +244,31 @@ class SDXLTextToLatentsInvocation(BaseInvocation):
|
||||
},
|
||||
}
|
||||
|
||||
def dispatch_progress(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
source_node_id: str,
|
||||
sample,
|
||||
step,
|
||||
total_steps,
|
||||
) -> None:
|
||||
stable_diffusion_xl_step_callback(
|
||||
context=context,
|
||||
node=self.dict(),
|
||||
source_node_id=source_node_id,
|
||||
sample=sample,
|
||||
step=step,
|
||||
total_steps=total_steps,
|
||||
)
|
||||
|
||||
# based on
|
||||
# https://github.com/huggingface/diffusers/blob/3ebbaf7c96801271f9e6c21400033b6aa5ffcf29/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py#L375
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
graph_execution_state = context.services.graph_execution_manager.get(
|
||||
context.graph_execution_state_id
|
||||
)
|
||||
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||
latents = context.services.latents.get(self.noise.latents_name)
|
||||
|
||||
positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name)
|
||||
@@ -341,6 +363,7 @@ class SDXLTextToLatentsInvocation(BaseInvocation):
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
self.dispatch_progress(context, source_node_id, latents, i, num_inference_steps)
|
||||
#if callback is not None and i % callback_steps == 0:
|
||||
# callback(i, t, latents)
|
||||
else:
|
||||
@@ -409,6 +432,7 @@ class SDXLTextToLatentsInvocation(BaseInvocation):
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
self.dispatch_progress(context, source_node_id, latents, i, num_inference_steps)
|
||||
#if callback is not None and i % callback_steps == 0:
|
||||
# callback(i, t, latents)
|
||||
|
||||
@@ -473,10 +497,31 @@ class SDXLLatentsToLatentsInvocation(BaseInvocation):
|
||||
},
|
||||
}
|
||||
|
||||
def dispatch_progress(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
source_node_id: str,
|
||||
sample,
|
||||
step,
|
||||
total_steps,
|
||||
) -> None:
|
||||
stable_diffusion_xl_step_callback(
|
||||
context=context,
|
||||
node=self.dict(),
|
||||
source_node_id=source_node_id,
|
||||
sample=sample,
|
||||
step=step,
|
||||
total_steps=total_steps,
|
||||
)
|
||||
|
||||
# based on
|
||||
# https://github.com/huggingface/diffusers/blob/3ebbaf7c96801271f9e6c21400033b6aa5ffcf29/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py#L375
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
graph_execution_state = context.services.graph_execution_manager.get(
|
||||
context.graph_execution_state_id
|
||||
)
|
||||
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||
latents = context.services.latents.get(self.latents.latents_name)
|
||||
|
||||
positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name)
|
||||
@@ -579,6 +624,7 @@ class SDXLLatentsToLatentsInvocation(BaseInvocation):
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
self.dispatch_progress(context, source_node_id, latents, i, num_inference_steps)
|
||||
#if callback is not None and i % callback_steps == 0:
|
||||
# callback(i, t, latents)
|
||||
else:
|
||||
@@ -647,6 +693,7 @@ class SDXLLatentsToLatentsInvocation(BaseInvocation):
|
||||
# call the callback, if provided
|
||||
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
||||
progress_bar.update()
|
||||
self.dispatch_progress(context, source_node_id, latents, i, num_inference_steps)
|
||||
#if callback is not None and i % callback_steps == 0:
|
||||
# callback(i, t, latents)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) & the InvokeAI Team
|
||||
from pathlib import Path, PosixPath
|
||||
from typing import Literal, Union, cast
|
||||
from pathlib import Path
|
||||
from typing import Literal, Union
|
||||
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
@@ -16,19 +16,20 @@ from .image import ImageOutput
|
||||
|
||||
# TODO: Populate this from disk?
|
||||
# TODO: Use model manager to load?
|
||||
REALESRGAN_MODELS = Literal[
|
||||
ESRGAN_MODELS = Literal[
|
||||
"RealESRGAN_x4plus.pth",
|
||||
"RealESRGAN_x4plus_anime_6B.pth",
|
||||
"ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
|
||||
"RealESRGAN_x2plus.pth",
|
||||
]
|
||||
|
||||
|
||||
class RealESRGANInvocation(BaseInvocation):
|
||||
class ESRGANInvocation(BaseInvocation):
|
||||
"""Upscales an image using RealESRGAN."""
|
||||
|
||||
type: Literal["realesrgan"] = "realesrgan"
|
||||
type: Literal["esrgan"] = "esrgan"
|
||||
image: Union[ImageField, None] = Field(default=None, description="The input image")
|
||||
model_name: REALESRGAN_MODELS = Field(
|
||||
model_name: ESRGAN_MODELS = Field(
|
||||
default="RealESRGAN_x4plus.pth", description="The Real-ESRGAN model to use"
|
||||
)
|
||||
|
||||
@@ -73,19 +74,17 @@ class RealESRGANInvocation(BaseInvocation):
|
||||
scale=4,
|
||||
)
|
||||
netscale = 4
|
||||
# TODO: add x2 models handling?
|
||||
# elif self.model_name in ["RealESRGAN_x2plus"]:
|
||||
# # x2 RRDBNet model
|
||||
# model = RRDBNet(
|
||||
# num_in_ch=3,
|
||||
# num_out_ch=3,
|
||||
# num_feat=64,
|
||||
# num_block=23,
|
||||
# num_grow_ch=32,
|
||||
# scale=2,
|
||||
# )
|
||||
# model_path = Path()
|
||||
# netscale = 2
|
||||
elif self.model_name in ["RealESRGAN_x2plus.pth"]:
|
||||
# x2 RRDBNet model
|
||||
rrdbnet_model = RRDBNet(
|
||||
num_in_ch=3,
|
||||
num_out_ch=3,
|
||||
num_feat=64,
|
||||
num_block=23,
|
||||
num_grow_ch=32,
|
||||
scale=2,
|
||||
)
|
||||
netscale = 2
|
||||
else:
|
||||
msg = f"Invalid RealESRGAN model: {self.model_name}"
|
||||
context.services.logger.error(msg)
|
||||
|
||||
@@ -32,11 +32,11 @@ class BoardImageRecordStorageBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_images_for_board(
|
||||
def get_all_board_image_names_for_board(
|
||||
self,
|
||||
board_id: str,
|
||||
) -> OffsetPaginatedResults[ImageRecord]:
|
||||
"""Gets images for a board."""
|
||||
) -> list[str]:
|
||||
"""Gets all board images for a board, as a list of the image names."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
@@ -211,6 +211,26 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
items=images, offset=offset, limit=limit, total=count
|
||||
)
|
||||
|
||||
def get_all_board_image_names_for_board(self, board_id: str) -> list[str]:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT image_name
|
||||
FROM board_images
|
||||
WHERE board_id = ?;
|
||||
""",
|
||||
(board_id,),
|
||||
)
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
image_names = list(map(lambda r: r[0], result))
|
||||
return image_names
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def get_board_for_image(
|
||||
self,
|
||||
image_name: str,
|
||||
|
||||
@@ -38,11 +38,11 @@ class BoardImagesServiceABC(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_images_for_board(
|
||||
def get_all_board_image_names_for_board(
|
||||
self,
|
||||
board_id: str,
|
||||
) -> OffsetPaginatedResults[ImageDTO]:
|
||||
"""Gets images for a board."""
|
||||
) -> list[str]:
|
||||
"""Gets all board images for a board, as a list of the image names."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
@@ -98,30 +98,13 @@ class BoardImagesService(BoardImagesServiceABC):
|
||||
) -> None:
|
||||
self._services.board_image_records.remove_image_from_board(board_id, image_name)
|
||||
|
||||
def get_images_for_board(
|
||||
def get_all_board_image_names_for_board(
|
||||
self,
|
||||
board_id: str,
|
||||
) -> OffsetPaginatedResults[ImageDTO]:
|
||||
image_records = self._services.board_image_records.get_images_for_board(
|
||||
) -> list[str]:
|
||||
return self._services.board_image_records.get_all_board_image_names_for_board(
|
||||
board_id
|
||||
)
|
||||
image_dtos = list(
|
||||
map(
|
||||
lambda r: image_record_to_dto(
|
||||
r,
|
||||
self._services.urls.get_image_url(r.image_name),
|
||||
self._services.urls.get_image_url(r.image_name, True),
|
||||
board_id,
|
||||
),
|
||||
image_records.items,
|
||||
)
|
||||
)
|
||||
return OffsetPaginatedResults[ImageDTO](
|
||||
items=image_dtos,
|
||||
offset=image_records.offset,
|
||||
limit=image_records.limit,
|
||||
total=image_records.total,
|
||||
)
|
||||
|
||||
def get_board_for_image(
|
||||
self,
|
||||
@@ -136,7 +119,7 @@ def board_record_to_dto(
|
||||
) -> BoardDTO:
|
||||
"""Converts a board record to a board DTO."""
|
||||
return BoardDTO(
|
||||
**board_record.dict(exclude={'cover_image_name'}),
|
||||
**board_record.dict(exclude={"cover_image_name"}),
|
||||
cover_image_name=cover_image_name,
|
||||
image_count=image_count,
|
||||
)
|
||||
|
||||
@@ -277,7 +277,7 @@ class InvokeAISettings(BaseSettings):
|
||||
@classmethod
|
||||
def _excluded_from_yaml(self)->List[str]:
|
||||
# combination of deprecated parameters and internal ones that shouldn't be exposed as invokeai.yaml options
|
||||
return ['type','initconf', 'gpu_mem_reserved', 'max_loaded_models', 'version', 'from_file', 'model', 'restore']
|
||||
return ['type','initconf', 'gpu_mem_reserved', 'max_loaded_models', 'version', 'from_file', 'model', 'restore', 'root']
|
||||
|
||||
class Config:
|
||||
env_file_encoding = 'utf-8'
|
||||
@@ -374,16 +374,16 @@ setting environment variables INVOKEAI_<setting>.
|
||||
max_cache_size : float = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance')
|
||||
max_vram_cache_size : float = Field(default=2.75, ge=0, description="Amount of VRAM reserved for model storage", category='Memory/Performance')
|
||||
gpu_mem_reserved : float = Field(default=2.75, ge=0, description="DEPRECATED: use max_vram_cache_size. Amount of VRAM reserved for model storage", category='DEPRECATED')
|
||||
precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='float16',description='Floating point precision', category='Memory/Performance')
|
||||
precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='auto',description='Floating point precision', category='Memory/Performance')
|
||||
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance')
|
||||
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance')
|
||||
tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category='Memory/Performance')
|
||||
|
||||
root : Path = Field(default=_find_root(), description='InvokeAI runtime root directory', category='Paths')
|
||||
autoimport_dir : Path = Field(default='autoimport/main', description='Path to a directory of models files to be imported on startup.', category='Paths')
|
||||
lora_dir : Path = Field(default='autoimport/lora', description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', category='Paths')
|
||||
embedding_dir : Path = Field(default='autoimport/embedding', description='Path to a directory of Textual Inversion embeddings to be imported on startup.', category='Paths')
|
||||
controlnet_dir : Path = Field(default='autoimport/controlnet', description='Path to a directory of ControlNet embeddings to be imported on startup.', category='Paths')
|
||||
autoimport_dir : Path = Field(default='autoimport', description='Path to a directory of models files to be imported on startup.', category='Paths')
|
||||
lora_dir : Path = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', category='Paths')
|
||||
embedding_dir : Path = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', category='Paths')
|
||||
controlnet_dir : Path = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', category='Paths')
|
||||
conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths')
|
||||
models_dir : Path = Field(default='models', description='Path to the models directory', category='Paths')
|
||||
legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths')
|
||||
@@ -397,7 +397,7 @@ setting environment variables INVOKEAI_<setting>.
|
||||
log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>"', category="Logging")
|
||||
# note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues
|
||||
log_format : Literal[tuple(['plain','color','syslog','legacy'])] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', category="Logging")
|
||||
log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="debug", description="Emit logging messages at this level or higher", category="Logging")
|
||||
log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="info", description="Emit logging messages at this level or higher", category="Logging")
|
||||
|
||||
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
|
||||
#fmt: on
|
||||
@@ -446,7 +446,7 @@ setting environment variables INVOKEAI_<setting>.
|
||||
Path to the runtime root directory
|
||||
'''
|
||||
if self.root:
|
||||
return Path(self.root).expanduser()
|
||||
return Path(self.root).expanduser().absolute()
|
||||
else:
|
||||
return self.find_root()
|
||||
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
from typing import Any, Optional
|
||||
from pathlib import Path
|
||||
|
||||
from invokeai.app.models.image import ProgressImage
|
||||
from invokeai.app.util.misc import get_timestamp
|
||||
from invokeai.app.services.model_manager_service import BaseModelType, ModelType, SubModelType, ModelInfo, AddModelResult
|
||||
from invokeai.app.services.model_manager_service import BaseModelType, ModelType, SubModelType, ModelInfo
|
||||
|
||||
class EventServiceBase:
|
||||
session_event: str = "session_event"
|
||||
@@ -113,7 +111,7 @@ class EventServiceBase:
|
||||
submodel: SubModelType,
|
||||
) -> None:
|
||||
"""Emitted when a model is requested"""
|
||||
self.dispatch(
|
||||
self.__emit_session_event(
|
||||
event_name="model_load_started",
|
||||
payload=dict(
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
@@ -134,7 +132,7 @@ class EventServiceBase:
|
||||
model_info: ModelInfo,
|
||||
) -> None:
|
||||
"""Emitted when a model is correctly loaded (returns model info)"""
|
||||
self.dispatch(
|
||||
self.__emit_session_event(
|
||||
event_name="model_load_completed",
|
||||
payload=dict(
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
@@ -143,96 +141,7 @@ class EventServiceBase:
|
||||
model_type=model_type,
|
||||
submodel=submodel,
|
||||
hash=model_info.hash,
|
||||
location=model_info.location,
|
||||
location=str(model_info.location),
|
||||
precision=str(model_info.precision),
|
||||
),
|
||||
)
|
||||
|
||||
def emit_model_import_started (
|
||||
self,
|
||||
import_path: str, # can be a local path, URL or repo_id
|
||||
)->None:
|
||||
"""Emitted when a model import commences"""
|
||||
self.dispatch(
|
||||
event_name="model_import_started",
|
||||
payload=dict(
|
||||
import_path = import_path,
|
||||
)
|
||||
)
|
||||
|
||||
def emit_model_import_completed (
|
||||
self,
|
||||
import_path: str, # can be a local path, URL or repo_id
|
||||
import_info: AddModelResult,
|
||||
success: bool= True,
|
||||
error: str = None,
|
||||
|
||||
)->None:
|
||||
"""Emitted when a model import completes"""
|
||||
self.dispatch(
|
||||
event_name="model_import_completed",
|
||||
payload=dict(
|
||||
import_path = import_path,
|
||||
import_info = import_info,
|
||||
success = success,
|
||||
error = error,
|
||||
)
|
||||
)
|
||||
|
||||
def emit_download_started (
|
||||
self,
|
||||
url: str,
|
||||
|
||||
)->None:
|
||||
"""Emitted when a download thread starts"""
|
||||
self.dispatch(
|
||||
event_name="download_started",
|
||||
payload=dict(
|
||||
url = url,
|
||||
)
|
||||
)
|
||||
|
||||
def emit_download_progress (
|
||||
self,
|
||||
url: str,
|
||||
downloaded_size: int,
|
||||
total_size: int,
|
||||
)->None:
|
||||
"""
|
||||
Emitted at intervals during a download process
|
||||
:param url: Requested URL
|
||||
:param downloaded_size: Bytes downloaded so far
|
||||
:param total_size: Total bytes to download
|
||||
"""
|
||||
self.dispatch(
|
||||
event_name="download_progress",
|
||||
payload=dict(
|
||||
url = url,
|
||||
downloaded_size = downloaded_size,
|
||||
total_size = total_size,
|
||||
)
|
||||
)
|
||||
|
||||
def emit_download_completed (
|
||||
self,
|
||||
url: str,
|
||||
status_code: int,
|
||||
download_path: Path,
|
||||
|
||||
)->None:
|
||||
"""
|
||||
Emitted when a download thread completes.
|
||||
:param url: Requested URL
|
||||
:param status_code: HTTP status code from request
|
||||
:param download_path: Path to downloaded file
|
||||
"""
|
||||
self.dispatch(
|
||||
event_name="download_completed",
|
||||
payload=dict(
|
||||
url = url,
|
||||
status_code = status_code,
|
||||
download_path = download_path,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -10,7 +10,10 @@ from pydantic.generics import GenericModel
|
||||
|
||||
from invokeai.app.models.image import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.services.models.image_record import (
|
||||
ImageRecord, ImageRecordChanges, deserialize_image_record)
|
||||
ImageRecord,
|
||||
ImageRecordChanges,
|
||||
deserialize_image_record,
|
||||
)
|
||||
|
||||
T = TypeVar("T", bound=BaseModel)
|
||||
|
||||
@@ -97,8 +100,8 @@ class ImageRecordStorageBase(ABC):
|
||||
@abstractmethod
|
||||
def get_many(
|
||||
self,
|
||||
offset: int = 0,
|
||||
limit: int = 10,
|
||||
offset: Optional[int] = None,
|
||||
limit: Optional[int] = None,
|
||||
image_origin: Optional[ResourceOrigin] = None,
|
||||
categories: Optional[list[ImageCategory]] = None,
|
||||
is_intermediate: Optional[bool] = None,
|
||||
@@ -119,6 +122,11 @@ class ImageRecordStorageBase(ABC):
|
||||
"""Deletes many image records."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete_intermediates(self) -> list[str]:
|
||||
"""Deletes all intermediate image records, returning a list of deleted image names."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def save(
|
||||
self,
|
||||
@@ -322,8 +330,8 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
|
||||
def get_many(
|
||||
self,
|
||||
offset: int = 0,
|
||||
limit: int = 10,
|
||||
offset: Optional[int] = None,
|
||||
limit: Optional[int] = None,
|
||||
image_origin: Optional[ResourceOrigin] = None,
|
||||
categories: Optional[list[ImageCategory]] = None,
|
||||
is_intermediate: Optional[bool] = None,
|
||||
@@ -377,11 +385,15 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
|
||||
query_params.append(is_intermediate)
|
||||
|
||||
if board_id is not None:
|
||||
# board_id of "none" is reserved for images without a board
|
||||
if board_id == "none":
|
||||
query_conditions += """--sql
|
||||
AND board_images.board_id IS NULL
|
||||
"""
|
||||
elif board_id is not None:
|
||||
query_conditions += """--sql
|
||||
AND board_images.board_id = ?
|
||||
"""
|
||||
|
||||
query_params.append(board_id)
|
||||
|
||||
query_pagination = """--sql
|
||||
@@ -392,8 +404,12 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
images_query += query_conditions + query_pagination + ";"
|
||||
# Add all the parameters
|
||||
images_params = query_params.copy()
|
||||
images_params.append(limit)
|
||||
images_params.append(offset)
|
||||
|
||||
if limit is not None:
|
||||
images_params.append(limit)
|
||||
if offset is not None:
|
||||
images_params.append(offset)
|
||||
|
||||
# Build the list of images, deserializing each row
|
||||
self._cursor.execute(images_query, images_params)
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
@@ -450,6 +466,32 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
|
||||
def delete_intermediates(self) -> list[str]:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT image_name FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
"""
|
||||
)
|
||||
result = cast(list[sqlite3.Row], self._cursor.fetchall())
|
||||
image_names = list(map(lambda r: r[0], result))
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DELETE FROM images
|
||||
WHERE is_intermediate = TRUE;
|
||||
"""
|
||||
)
|
||||
self._conn.commit()
|
||||
return image_names
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordDeleteException from e
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def save(
|
||||
self,
|
||||
image_name: str,
|
||||
|
||||
@@ -6,22 +6,33 @@ from typing import TYPE_CHECKING, Optional
|
||||
from PIL.Image import Image as PILImageType
|
||||
|
||||
from invokeai.app.invocations.metadata import ImageMetadata
|
||||
from invokeai.app.models.image import (ImageCategory,
|
||||
InvalidImageCategoryException,
|
||||
InvalidOriginException, ResourceOrigin)
|
||||
from invokeai.app.services.board_image_record_storage import \
|
||||
BoardImageRecordStorageBase
|
||||
from invokeai.app.services.graph import Graph
|
||||
from invokeai.app.models.image import (
|
||||
ImageCategory,
|
||||
InvalidImageCategoryException,
|
||||
InvalidOriginException,
|
||||
ResourceOrigin,
|
||||
)
|
||||
from invokeai.app.services.board_image_record_storage import BoardImageRecordStorageBase
|
||||
from invokeai.app.services.image_file_storage import (
|
||||
ImageFileDeleteException, ImageFileNotFoundException,
|
||||
ImageFileSaveException, ImageFileStorageBase)
|
||||
ImageFileDeleteException,
|
||||
ImageFileNotFoundException,
|
||||
ImageFileSaveException,
|
||||
ImageFileStorageBase,
|
||||
)
|
||||
from invokeai.app.services.image_record_storage import (
|
||||
ImageRecordDeleteException, ImageRecordNotFoundException,
|
||||
ImageRecordSaveException, ImageRecordStorageBase, OffsetPaginatedResults)
|
||||
ImageRecordDeleteException,
|
||||
ImageRecordNotFoundException,
|
||||
ImageRecordSaveException,
|
||||
ImageRecordStorageBase,
|
||||
OffsetPaginatedResults,
|
||||
)
|
||||
from invokeai.app.services.item_storage import ItemStorageABC
|
||||
from invokeai.app.services.models.image_record import (ImageDTO, ImageRecord,
|
||||
ImageRecordChanges,
|
||||
image_record_to_dto)
|
||||
from invokeai.app.services.models.image_record import (
|
||||
ImageDTO,
|
||||
ImageRecord,
|
||||
ImageRecordChanges,
|
||||
image_record_to_dto,
|
||||
)
|
||||
from invokeai.app.services.resource_name import NameServiceBase
|
||||
from invokeai.app.services.urls import UrlServiceBase
|
||||
from invokeai.app.util.metadata import get_metadata_graph_from_raw_session
|
||||
@@ -41,6 +52,7 @@ class ImageServiceABC(ABC):
|
||||
image_category: ImageCategory,
|
||||
node_id: Optional[str] = None,
|
||||
session_id: Optional[str] = None,
|
||||
board_id: Optional[str] = None,
|
||||
is_intermediate: bool = False,
|
||||
metadata: Optional[dict] = None,
|
||||
) -> ImageDTO:
|
||||
@@ -109,6 +121,11 @@ class ImageServiceABC(ABC):
|
||||
"""Deletes an image."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete_intermediates(self) -> int:
|
||||
"""Deletes all intermediate images."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete_images_on_board(self, board_id: str):
|
||||
"""Deletes all images on a board."""
|
||||
@@ -158,6 +175,7 @@ class ImageService(ImageServiceABC):
|
||||
image_category: ImageCategory,
|
||||
node_id: Optional[str] = None,
|
||||
session_id: Optional[str] = None,
|
||||
board_id: Optional[str] = None,
|
||||
is_intermediate: bool = False,
|
||||
metadata: Optional[dict] = None,
|
||||
) -> ImageDTO:
|
||||
@@ -199,6 +217,11 @@ class ImageService(ImageServiceABC):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if board_id is not None:
|
||||
self._services.board_image_records.add_image_to_board(
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
|
||||
self._services.image_files.save(
|
||||
image_name=image_name, image=image, metadata=metadata, graph=graph
|
||||
)
|
||||
@@ -378,16 +401,31 @@ class ImageService(ImageServiceABC):
|
||||
|
||||
def delete_images_on_board(self, board_id: str):
|
||||
try:
|
||||
images = self._services.board_image_records.get_images_for_board(board_id)
|
||||
image_name_list = list(
|
||||
map(
|
||||
lambda r: r.image_name,
|
||||
images.items,
|
||||
image_names = (
|
||||
self._services.board_image_records.get_all_board_image_names_for_board(
|
||||
board_id
|
||||
)
|
||||
)
|
||||
for image_name in image_name_list:
|
||||
for image_name in image_names:
|
||||
self._services.image_files.delete(image_name)
|
||||
self._services.image_records.delete_many(image_name_list)
|
||||
self._services.image_records.delete_many(image_names)
|
||||
except ImageRecordDeleteException:
|
||||
self._services.logger.error(f"Failed to delete image records")
|
||||
raise
|
||||
except ImageFileDeleteException:
|
||||
self._services.logger.error(f"Failed to delete image files")
|
||||
raise
|
||||
except Exception as e:
|
||||
self._services.logger.error("Problem deleting image records and files")
|
||||
raise e
|
||||
|
||||
def delete_intermediates(self) -> int:
|
||||
try:
|
||||
image_names = self._services.image_records.delete_intermediates()
|
||||
count = len(image_names)
|
||||
for image_name in image_names:
|
||||
self._services.image_files.delete(image_name)
|
||||
return count
|
||||
except ImageRecordDeleteException:
|
||||
self._services.logger.error(f"Failed to delete image records")
|
||||
raise
|
||||
|
||||
@@ -26,7 +26,6 @@ import torch
|
||||
from invokeai.app.models.exceptions import CanceledException
|
||||
from ...backend.util import choose_precision, choose_torch_device
|
||||
from .config import InvokeAIAppConfig
|
||||
from .events import EventServiceBase
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..invocations.baseinvocation import BaseInvocation, InvocationContext
|
||||
@@ -300,10 +299,11 @@ class ModelManagerService(ModelManagerServiceBase):
|
||||
else:
|
||||
config_file = config.root_dir / "configs/models.yaml"
|
||||
|
||||
logger.debug(f'config file={config_file}')
|
||||
logger.debug(f'Config file={config_file}')
|
||||
|
||||
device = torch.device(choose_torch_device())
|
||||
logger.debug(f'GPU device = {device}')
|
||||
device_name = torch.cuda.get_device_name() if device==torch.device('cuda') else ''
|
||||
logger.info(f'GPU device = {device} {device_name}')
|
||||
|
||||
precision = config.precision
|
||||
if precision == "auto":
|
||||
@@ -543,7 +543,6 @@ class ModelManagerService(ModelManagerServiceBase):
|
||||
def heuristic_import(self,
|
||||
items_to_import: set[str],
|
||||
prediction_type_helper: Optional[Callable[[Path],SchedulerPredictionType]]=None,
|
||||
event_bus: Optional[EventServiceBase]=None,
|
||||
)->dict[str, AddModelResult]:
|
||||
'''Import a list of paths, repo_ids or URLs. Returns the set of
|
||||
successfully imported items.
|
||||
@@ -561,7 +560,7 @@ class ModelManagerService(ModelManagerServiceBase):
|
||||
of the set is a dict corresponding to the newly-created OmegaConf stanza for
|
||||
that model.
|
||||
'''
|
||||
return self.mgr.heuristic_import(items_to_import, prediction_type_helper, event_bus=event_bus)
|
||||
return self.mgr.heuristic_import(items_to_import, prediction_type_helper)
|
||||
|
||||
def merge_models(
|
||||
self,
|
||||
|
||||
342
invokeai/app/util/controlnet_utils.py
Normal file
@@ -0,0 +1,342 @@
|
||||
import torch
|
||||
import numpy as np
|
||||
import cv2
|
||||
from PIL import Image
|
||||
from diffusers.utils import PIL_INTERPOLATION
|
||||
|
||||
from einops import rearrange
|
||||
from controlnet_aux.util import HWC3, resize_image
|
||||
|
||||
###################################################################
|
||||
# Copy of scripts/lvminthin.py from Mikubill/sd-webui-controlnet
|
||||
###################################################################
|
||||
# High Quality Edge Thinning using Pure Python
|
||||
# Written by Lvmin Zhangu
|
||||
# 2023 April
|
||||
# Stanford University
|
||||
# If you use this, please Cite "High Quality Edge Thinning using Pure Python", Lvmin Zhang, In Mikubill/sd-webui-controlnet.
|
||||
|
||||
lvmin_kernels_raw = [
|
||||
np.array([
|
||||
[-1, -1, -1],
|
||||
[0, 1, 0],
|
||||
[1, 1, 1]
|
||||
], dtype=np.int32),
|
||||
np.array([
|
||||
[0, -1, -1],
|
||||
[1, 1, -1],
|
||||
[0, 1, 0]
|
||||
], dtype=np.int32)
|
||||
]
|
||||
|
||||
lvmin_kernels = []
|
||||
lvmin_kernels += [np.rot90(x, k=0, axes=(0, 1)) for x in lvmin_kernels_raw]
|
||||
lvmin_kernels += [np.rot90(x, k=1, axes=(0, 1)) for x in lvmin_kernels_raw]
|
||||
lvmin_kernels += [np.rot90(x, k=2, axes=(0, 1)) for x in lvmin_kernels_raw]
|
||||
lvmin_kernels += [np.rot90(x, k=3, axes=(0, 1)) for x in lvmin_kernels_raw]
|
||||
|
||||
lvmin_prunings_raw = [
|
||||
np.array([
|
||||
[-1, -1, -1],
|
||||
[-1, 1, -1],
|
||||
[0, 0, -1]
|
||||
], dtype=np.int32),
|
||||
np.array([
|
||||
[-1, -1, -1],
|
||||
[-1, 1, -1],
|
||||
[-1, 0, 0]
|
||||
], dtype=np.int32)
|
||||
]
|
||||
|
||||
lvmin_prunings = []
|
||||
lvmin_prunings += [np.rot90(x, k=0, axes=(0, 1)) for x in lvmin_prunings_raw]
|
||||
lvmin_prunings += [np.rot90(x, k=1, axes=(0, 1)) for x in lvmin_prunings_raw]
|
||||
lvmin_prunings += [np.rot90(x, k=2, axes=(0, 1)) for x in lvmin_prunings_raw]
|
||||
lvmin_prunings += [np.rot90(x, k=3, axes=(0, 1)) for x in lvmin_prunings_raw]
|
||||
|
||||
|
||||
def remove_pattern(x, kernel):
|
||||
objects = cv2.morphologyEx(x, cv2.MORPH_HITMISS, kernel)
|
||||
objects = np.where(objects > 127)
|
||||
x[objects] = 0
|
||||
return x, objects[0].shape[0] > 0
|
||||
|
||||
|
||||
def thin_one_time(x, kernels):
|
||||
y = x
|
||||
is_done = True
|
||||
for k in kernels:
|
||||
y, has_update = remove_pattern(y, k)
|
||||
if has_update:
|
||||
is_done = False
|
||||
return y, is_done
|
||||
|
||||
|
||||
def lvmin_thin(x, prunings=True):
|
||||
y = x
|
||||
for i in range(32):
|
||||
y, is_done = thin_one_time(y, lvmin_kernels)
|
||||
if is_done:
|
||||
break
|
||||
if prunings:
|
||||
y, _ = thin_one_time(y, lvmin_prunings)
|
||||
return y
|
||||
|
||||
|
||||
def nake_nms(x):
|
||||
f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
|
||||
f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
|
||||
f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
|
||||
f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
|
||||
y = np.zeros_like(x)
|
||||
for f in [f1, f2, f3, f4]:
|
||||
np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
|
||||
return y
|
||||
|
||||
|
||||
################################################################################
|
||||
# copied from Mikubill/sd-webui-controlnet external_code.py and modified for InvokeAI
|
||||
################################################################################
|
||||
# FIXME: not using yet, if used in the future will most likely require modification of preprocessors
|
||||
def pixel_perfect_resolution(
|
||||
image: np.ndarray,
|
||||
target_H: int,
|
||||
target_W: int,
|
||||
resize_mode: str,
|
||||
) -> int:
|
||||
"""
|
||||
Calculate the estimated resolution for resizing an image while preserving aspect ratio.
|
||||
|
||||
The function first calculates scaling factors for height and width of the image based on the target
|
||||
height and width. Then, based on the chosen resize mode, it either takes the smaller or the larger
|
||||
scaling factor to estimate the new resolution.
|
||||
|
||||
If the resize mode is OUTER_FIT, the function uses the smaller scaling factor, ensuring the whole image
|
||||
fits within the target dimensions, potentially leaving some empty space.
|
||||
|
||||
If the resize mode is not OUTER_FIT, the function uses the larger scaling factor, ensuring the target
|
||||
dimensions are fully filled, potentially cropping the image.
|
||||
|
||||
After calculating the estimated resolution, the function prints some debugging information.
|
||||
|
||||
Args:
|
||||
image (np.ndarray): A 3D numpy array representing an image. The dimensions represent [height, width, channels].
|
||||
target_H (int): The target height for the image.
|
||||
target_W (int): The target width for the image.
|
||||
resize_mode (ResizeMode): The mode for resizing.
|
||||
|
||||
Returns:
|
||||
int: The estimated resolution after resizing.
|
||||
"""
|
||||
raw_H, raw_W, _ = image.shape
|
||||
|
||||
k0 = float(target_H) / float(raw_H)
|
||||
k1 = float(target_W) / float(raw_W)
|
||||
|
||||
if resize_mode == "fill_resize":
|
||||
estimation = min(k0, k1) * float(min(raw_H, raw_W))
|
||||
else: # "crop_resize" or "just_resize" (or possibly "just_resize_simple"?)
|
||||
estimation = max(k0, k1) * float(min(raw_H, raw_W))
|
||||
|
||||
# print(f"Pixel Perfect Computation:")
|
||||
# print(f"resize_mode = {resize_mode}")
|
||||
# print(f"raw_H = {raw_H}")
|
||||
# print(f"raw_W = {raw_W}")
|
||||
# print(f"target_H = {target_H}")
|
||||
# print(f"target_W = {target_W}")
|
||||
# print(f"estimation = {estimation}")
|
||||
|
||||
return int(np.round(estimation))
|
||||
|
||||
|
||||
###########################################################################
|
||||
# Copied from detectmap_proc method in scripts/detectmap_proc.py in Mikubill/sd-webui-controlnet
|
||||
# modified for InvokeAI
|
||||
###########################################################################
|
||||
# def detectmap_proc(detected_map, module, resize_mode, h, w):
|
||||
def np_img_resize(
|
||||
np_img: np.ndarray,
|
||||
resize_mode: str,
|
||||
h: int,
|
||||
w: int,
|
||||
device: torch.device = torch.device('cpu')
|
||||
):
|
||||
# if 'inpaint' in module:
|
||||
# np_img = np_img.astype(np.float32)
|
||||
# else:
|
||||
# np_img = HWC3(np_img)
|
||||
np_img = HWC3(np_img)
|
||||
|
||||
def safe_numpy(x):
|
||||
# A very safe method to make sure that Apple/Mac works
|
||||
y = x
|
||||
|
||||
# below is very boring but do not change these. If you change these Apple or Mac may fail.
|
||||
y = y.copy()
|
||||
y = np.ascontiguousarray(y)
|
||||
y = y.copy()
|
||||
return y
|
||||
|
||||
def get_pytorch_control(x):
|
||||
# A very safe method to make sure that Apple/Mac works
|
||||
y = x
|
||||
|
||||
# below is very boring but do not change these. If you change these Apple or Mac may fail.
|
||||
y = torch.from_numpy(y)
|
||||
y = y.float() / 255.0
|
||||
y = rearrange(y, 'h w c -> 1 c h w')
|
||||
y = y.clone()
|
||||
# y = y.to(devices.get_device_for("controlnet"))
|
||||
y = y.to(device)
|
||||
y = y.clone()
|
||||
return y
|
||||
|
||||
def high_quality_resize(x: np.ndarray,
|
||||
size):
|
||||
# Written by lvmin
|
||||
# Super high-quality control map up-scaling, considering binary, seg, and one-pixel edges
|
||||
inpaint_mask = None
|
||||
if x.ndim == 3 and x.shape[2] == 4:
|
||||
inpaint_mask = x[:, :, 3]
|
||||
x = x[:, :, 0:3]
|
||||
|
||||
new_size_is_smaller = (size[0] * size[1]) < (x.shape[0] * x.shape[1])
|
||||
new_size_is_bigger = (size[0] * size[1]) > (x.shape[0] * x.shape[1])
|
||||
unique_color_count = np.unique(x.reshape(-1, x.shape[2]), axis=0).shape[0]
|
||||
is_one_pixel_edge = False
|
||||
is_binary = False
|
||||
if unique_color_count == 2:
|
||||
is_binary = np.min(x) < 16 and np.max(x) > 240
|
||||
if is_binary:
|
||||
xc = x
|
||||
xc = cv2.erode(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
|
||||
xc = cv2.dilate(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
|
||||
one_pixel_edge_count = np.where(xc < x)[0].shape[0]
|
||||
all_edge_count = np.where(x > 127)[0].shape[0]
|
||||
is_one_pixel_edge = one_pixel_edge_count * 2 > all_edge_count
|
||||
|
||||
if 2 < unique_color_count < 200:
|
||||
interpolation = cv2.INTER_NEAREST
|
||||
elif new_size_is_smaller:
|
||||
interpolation = cv2.INTER_AREA
|
||||
else:
|
||||
interpolation = cv2.INTER_CUBIC # Must be CUBIC because we now use nms. NEVER CHANGE THIS
|
||||
|
||||
y = cv2.resize(x, size, interpolation=interpolation)
|
||||
if inpaint_mask is not None:
|
||||
inpaint_mask = cv2.resize(inpaint_mask, size, interpolation=interpolation)
|
||||
|
||||
if is_binary:
|
||||
y = np.mean(y.astype(np.float32), axis=2).clip(0, 255).astype(np.uint8)
|
||||
if is_one_pixel_edge:
|
||||
y = nake_nms(y)
|
||||
_, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
y = lvmin_thin(y, prunings=new_size_is_bigger)
|
||||
else:
|
||||
_, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
y = np.stack([y] * 3, axis=2)
|
||||
|
||||
if inpaint_mask is not None:
|
||||
inpaint_mask = (inpaint_mask > 127).astype(np.float32) * 255.0
|
||||
inpaint_mask = inpaint_mask[:, :, None].clip(0, 255).astype(np.uint8)
|
||||
y = np.concatenate([y, inpaint_mask], axis=2)
|
||||
|
||||
return y
|
||||
|
||||
# if resize_mode == external_code.ResizeMode.RESIZE:
|
||||
if resize_mode == "just_resize": # RESIZE
|
||||
np_img = high_quality_resize(np_img, (w, h))
|
||||
np_img = safe_numpy(np_img)
|
||||
return get_pytorch_control(np_img), np_img
|
||||
|
||||
old_h, old_w, _ = np_img.shape
|
||||
old_w = float(old_w)
|
||||
old_h = float(old_h)
|
||||
k0 = float(h) / old_h
|
||||
k1 = float(w) / old_w
|
||||
|
||||
safeint = lambda x: int(np.round(x))
|
||||
|
||||
# if resize_mode == external_code.ResizeMode.OUTER_FIT:
|
||||
if resize_mode == "fill_resize": # OUTER_FIT
|
||||
k = min(k0, k1)
|
||||
borders = np.concatenate([np_img[0, :, :], np_img[-1, :, :], np_img[:, 0, :], np_img[:, -1, :]], axis=0)
|
||||
high_quality_border_color = np.median(borders, axis=0).astype(np_img.dtype)
|
||||
if len(high_quality_border_color) == 4:
|
||||
# Inpaint hijack
|
||||
high_quality_border_color[3] = 255
|
||||
high_quality_background = np.tile(high_quality_border_color[None, None], [h, w, 1])
|
||||
np_img = high_quality_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (h - new_h) // 2)
|
||||
pad_w = max(0, (w - new_w) // 2)
|
||||
high_quality_background[pad_h:pad_h + new_h, pad_w:pad_w + new_w] = np_img
|
||||
np_img = high_quality_background
|
||||
np_img = safe_numpy(np_img)
|
||||
return get_pytorch_control(np_img), np_img
|
||||
else: # resize_mode == "crop_resize" (INNER_FIT)
|
||||
k = max(k0, k1)
|
||||
np_img = high_quality_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (new_h - h) // 2)
|
||||
pad_w = max(0, (new_w - w) // 2)
|
||||
np_img = np_img[pad_h:pad_h + h, pad_w:pad_w + w]
|
||||
np_img = safe_numpy(np_img)
|
||||
return get_pytorch_control(np_img), np_img
|
||||
|
||||
def prepare_control_image(
|
||||
# image used to be Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor, List[torch.Tensor]]
|
||||
# but now should be able to assume that image is a single PIL.Image, which simplifies things
|
||||
image: Image,
|
||||
# FIXME: need to fix hardwiring of width and height, change to basing on latents dimensions?
|
||||
# latents_to_match_resolution, # TorchTensor of shape (batch_size, 3, height, width)
|
||||
width=512, # should be 8 * latent.shape[3]
|
||||
height=512, # should be 8 * latent height[2]
|
||||
# batch_size=1, # currently no batching
|
||||
# num_images_per_prompt=1, # currently only single image
|
||||
device="cuda",
|
||||
dtype=torch.float16,
|
||||
do_classifier_free_guidance=True,
|
||||
control_mode="balanced",
|
||||
resize_mode="just_resize_simple",
|
||||
):
|
||||
# FIXME: implement "crop_resize_simple" and "fill_resize_simple", or pull them out
|
||||
if (resize_mode == "just_resize_simple" or
|
||||
resize_mode == "crop_resize_simple" or
|
||||
resize_mode == "fill_resize_simple"):
|
||||
image = image.convert("RGB")
|
||||
if (resize_mode == "just_resize_simple"):
|
||||
image = image.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])
|
||||
elif (resize_mode == "crop_resize_simple"): # not yet implemented
|
||||
pass
|
||||
elif (resize_mode == "fill_resize_simple"): # not yet implemented
|
||||
pass
|
||||
nimage = np.array(image)
|
||||
nimage = nimage[None, :]
|
||||
nimage = np.concatenate([nimage], axis=0)
|
||||
# normalizing RGB values to [0,1] range (in PIL.Image they are [0-255])
|
||||
nimage = np.array(nimage).astype(np.float32) / 255.0
|
||||
nimage = nimage.transpose(0, 3, 1, 2)
|
||||
timage = torch.from_numpy(nimage)
|
||||
|
||||
# use fancy lvmin controlnet resizing
|
||||
elif (resize_mode == "just_resize" or resize_mode == "crop_resize" or resize_mode == "fill_resize"):
|
||||
nimage = np.array(image)
|
||||
timage, nimage = np_img_resize(
|
||||
np_img=nimage,
|
||||
resize_mode=resize_mode,
|
||||
h=height,
|
||||
w=width,
|
||||
# device=torch.device('cpu')
|
||||
device=device,
|
||||
)
|
||||
else:
|
||||
pass
|
||||
print("ERROR: invalid resize_mode ==> ", resize_mode)
|
||||
exit(1)
|
||||
|
||||
timage = timage.to(device=device, dtype=dtype)
|
||||
cfg_injection = (control_mode == "more_control" or control_mode == "unbalanced")
|
||||
if do_classifier_free_guidance and not cfg_injection:
|
||||
timage = torch.cat([timage] * 2)
|
||||
return timage
|
||||
@@ -1,9 +1,30 @@
|
||||
import torch
|
||||
from PIL import Image
|
||||
from invokeai.app.models.exceptions import CanceledException
|
||||
from invokeai.app.models.image import ProgressImage
|
||||
from ..invocations.baseinvocation import InvocationContext
|
||||
from ...backend.util.util import image_to_dataURL
|
||||
from ...backend.generator.base import Generator
|
||||
from ...backend.stable_diffusion import PipelineIntermediateState
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
|
||||
def sample_to_lowres_estimated_image(samples, latent_rgb_factors, smooth_matrix = None):
|
||||
latent_image = samples[0].permute(1, 2, 0) @ latent_rgb_factors
|
||||
|
||||
if smooth_matrix is not None:
|
||||
latent_image = latent_image.unsqueeze(0).permute(3, 0, 1, 2)
|
||||
latent_image = torch.nn.functional.conv2d(latent_image, smooth_matrix.reshape((1,1,3,3)), padding=1)
|
||||
latent_image = latent_image.permute(1, 2, 3, 0).squeeze(0)
|
||||
|
||||
latents_ubyte = (
|
||||
((latent_image + 1) / 2)
|
||||
.clamp(0, 1) # change scale from -1..1 to 0..1
|
||||
.mul(0xFF) # to 0..255
|
||||
.byte()
|
||||
).cpu()
|
||||
|
||||
return Image.fromarray(latents_ubyte.numpy())
|
||||
|
||||
|
||||
def stable_diffusion_step_callback(
|
||||
@@ -37,7 +58,24 @@ def stable_diffusion_step_callback(
|
||||
# step = intermediate_state.step
|
||||
|
||||
# TODO: only output a preview image when requested
|
||||
image = Generator.sample_to_lowres_estimated_image(sample)
|
||||
|
||||
# origingally adapted from code by @erucipe and @keturn here:
|
||||
# https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7
|
||||
|
||||
# these updated numbers for v1.5 are from @torridgristle
|
||||
v1_5_latent_rgb_factors = torch.tensor(
|
||||
[
|
||||
# R G B
|
||||
[0.3444, 0.1385, 0.0670], # L1
|
||||
[0.1247, 0.4027, 0.1494], # L2
|
||||
[-0.3192, 0.2513, 0.2103], # L3
|
||||
[-0.1307, -0.1874, -0.7445], # L4
|
||||
],
|
||||
dtype=sample.dtype,
|
||||
device=sample.device,
|
||||
)
|
||||
|
||||
image = sample_to_lowres_estimated_image(sample, v1_5_latent_rgb_factors)
|
||||
|
||||
(width, height) = image.size
|
||||
width *= 8
|
||||
@@ -53,3 +91,56 @@ def stable_diffusion_step_callback(
|
||||
step=intermediate_state.step,
|
||||
total_steps=node["steps"],
|
||||
)
|
||||
|
||||
def stable_diffusion_xl_step_callback(
|
||||
context: InvocationContext,
|
||||
node: dict,
|
||||
source_node_id: str,
|
||||
sample,
|
||||
step,
|
||||
total_steps,
|
||||
):
|
||||
if context.services.queue.is_canceled(context.graph_execution_state_id):
|
||||
raise CanceledException
|
||||
|
||||
sdxl_latent_rgb_factors = torch.tensor(
|
||||
[
|
||||
# R G B
|
||||
[ 0.3816, 0.4930, 0.5320],
|
||||
[-0.3753, 0.1631, 0.1739],
|
||||
[ 0.1770, 0.3588, -0.2048],
|
||||
[-0.4350, -0.2644, -0.4289],
|
||||
],
|
||||
dtype=sample.dtype,
|
||||
device=sample.device,
|
||||
)
|
||||
|
||||
sdxl_smooth_matrix = torch.tensor(
|
||||
[
|
||||
#[ 0.0478, 0.1285, 0.0478],
|
||||
#[ 0.1285, 0.2948, 0.1285],
|
||||
#[ 0.0478, 0.1285, 0.0478],
|
||||
[0.0358, 0.0964, 0.0358],
|
||||
[0.0964, 0.4711, 0.0964],
|
||||
[0.0358, 0.0964, 0.0358],
|
||||
],
|
||||
dtype=sample.dtype,
|
||||
device=sample.device,
|
||||
)
|
||||
|
||||
image = sample_to_lowres_estimated_image(sample, sdxl_latent_rgb_factors, sdxl_smooth_matrix)
|
||||
|
||||
(width, height) = image.size
|
||||
width *= 8
|
||||
height *= 8
|
||||
|
||||
dataURL = image_to_dataURL(image, image_format="JPEG")
|
||||
|
||||
context.services.events.emit_generator_progress(
|
||||
graph_execution_state_id=context.graph_execution_state_id,
|
||||
node=node,
|
||||
source_node_id=source_node_id,
|
||||
progress_image=ProgressImage(width=width, height=height, dataURL=dataURL),
|
||||
step=step,
|
||||
total_steps=total_steps,
|
||||
)
|
||||
@@ -23,6 +23,7 @@ from urllib import request
|
||||
|
||||
import npyscreen
|
||||
import transformers
|
||||
import omegaconf
|
||||
from diffusers import AutoencoderKL
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from huggingface_hub import HfFolder
|
||||
@@ -44,6 +45,7 @@ from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.frontend.install.model_install import addModelsForm, process_and_execute
|
||||
from invokeai.frontend.install.widgets import (
|
||||
CenteredButtonPress,
|
||||
FileBox,
|
||||
IntTitleSlider,
|
||||
set_min_terminal_size,
|
||||
CyclingForm,
|
||||
@@ -222,7 +224,7 @@ def download_conversion_models():
|
||||
|
||||
# ---------------------------------------------
|
||||
def download_realesrgan():
|
||||
logger.info("Installing RealESRGAN models...")
|
||||
logger.info("Installing ESRGAN Upscaling models...")
|
||||
URLs = [
|
||||
dict(
|
||||
url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
|
||||
@@ -239,6 +241,11 @@ def download_realesrgan():
|
||||
dest= "core/upscaling/realesrgan/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
|
||||
description = "ESRGAN_SRx4_DF2KOST_official.pth",
|
||||
),
|
||||
dict(
|
||||
url= "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
|
||||
dest= "core/upscaling/realesrgan/RealESRGAN_x2plus.pth",
|
||||
description = "RealESRGAN_x2plus.pth",
|
||||
),
|
||||
]
|
||||
for model in URLs:
|
||||
download_with_progress_bar(model['url'], config.models_path / model['dest'], model['description'])
|
||||
@@ -404,21 +411,21 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
||||
self.nextrely += 1
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value="Directories containing textual inversion, controlnet and LoRA models (<tab> autocompletes, ctrl-N advances):",
|
||||
value="Folder to recursively scan for new checkpoints, ControlNets, LoRAs and TI models (<tab> autocompletes, ctrl-N advances):",
|
||||
editable=False,
|
||||
color="CONTROL",
|
||||
)
|
||||
self.autoimport_dirs = {}
|
||||
for description, config_name, path in autoimport_paths(old_opts):
|
||||
self.autoimport_dirs[config_name] = self.add_widget_intelligent(
|
||||
npyscreen.TitleFilename,
|
||||
name=description+':',
|
||||
value=str(path),
|
||||
self.autoimport_dirs['autoimport_dir'] = self.add_widget_intelligent(
|
||||
FileBox,
|
||||
name=f'Autoimport Folder',
|
||||
value=str(config.root_path / config.autoimport_dir),
|
||||
select_dir=True,
|
||||
must_exist=False,
|
||||
use_two_lines=False,
|
||||
labelColor="GOOD",
|
||||
begin_entry_at=32,
|
||||
max_height = 3,
|
||||
scroll_exit=True
|
||||
)
|
||||
self.nextrely += 1
|
||||
@@ -555,7 +562,6 @@ def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Nam
|
||||
editApp.run()
|
||||
return editApp.new_opts()
|
||||
|
||||
|
||||
def default_startup_options(init_file: Path) -> Namespace:
|
||||
opts = InvokeAIAppConfig.get_config()
|
||||
if not init_file.exists():
|
||||
@@ -563,7 +569,14 @@ def default_startup_options(init_file: Path) -> Namespace:
|
||||
return opts
|
||||
|
||||
def default_user_selections(program_opts: Namespace) -> InstallSelections:
|
||||
installer = ModelInstall(config)
|
||||
|
||||
try:
|
||||
installer = ModelInstall(config)
|
||||
except omegaconf.errors.ConfigKeyError:
|
||||
logger.warning('Your models.yaml file is corrupt or out of date. Reinitializing')
|
||||
initialize_rootdir(config.root_path, True)
|
||||
installer = ModelInstall(config)
|
||||
|
||||
models = installer.all_models()
|
||||
return InstallSelections(
|
||||
install_models=[models[installer.default_model()].path or models[installer.default_model()].repo_id]
|
||||
@@ -571,19 +584,8 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections:
|
||||
else [models[x].path or models[x].repo_id for x in installer.recommended_models()]
|
||||
if program_opts.yes_to_all
|
||||
else list(),
|
||||
# scan_directory=None,
|
||||
# autoscan_on_startup=None,
|
||||
)
|
||||
|
||||
# -------------------------------------
|
||||
def autoimport_paths(config: InvokeAIAppConfig):
|
||||
return [
|
||||
('Checkpoints & diffusers models', 'autoimport_dir', config.root_path / config.autoimport_dir),
|
||||
('LoRA/LyCORIS models', 'lora_dir', config.root_path / config.lora_dir),
|
||||
('Controlnet models', 'controlnet_dir', config.root_path / config.controlnet_dir),
|
||||
('Textual Inversion Embeddings', 'embedding_dir', config.root_path / config.embedding_dir),
|
||||
]
|
||||
|
||||
# -------------------------------------
|
||||
def initialize_rootdir(root: Path, yes_to_all: bool = False):
|
||||
logger.info("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **")
|
||||
@@ -659,6 +661,9 @@ def write_opts(opts: Namespace, init_file: Path):
|
||||
with open(init_file,'w', encoding='utf-8') as file:
|
||||
file.write(new_config.to_yaml())
|
||||
|
||||
if hasattr(opts,'hf_token') and opts.hf_token:
|
||||
HfLogin(opts.hf_token)
|
||||
|
||||
# -------------------------------------
|
||||
def default_output_dir() -> Path:
|
||||
return config.root_path / "outputs"
|
||||
|
||||
@@ -10,7 +10,7 @@ from tempfile import TemporaryDirectory
|
||||
from typing import List, Dict, Callable, Union, Set
|
||||
|
||||
import requests
|
||||
from diffusers import StableDiffusionPipeline
|
||||
from diffusers import DiffusionPipeline
|
||||
from diffusers import logging as dlogging
|
||||
from huggingface_hub import hf_hub_url, HfFolder, HfApi
|
||||
from omegaconf import OmegaConf
|
||||
@@ -89,16 +89,13 @@ class ModelInstall(object):
|
||||
config:InvokeAIAppConfig,
|
||||
prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None,
|
||||
model_manager: ModelManager = None,
|
||||
access_token:str = None,
|
||||
event_bus = None, # EventServicesBase - getting circular import errors
|
||||
):
|
||||
access_token:str = None):
|
||||
self.config = config
|
||||
self.mgr = model_manager or ModelManager(config.model_conf_path)
|
||||
self.datasets = OmegaConf.load(Dataset_path)
|
||||
self.prediction_helper = prediction_type_helper
|
||||
self.access_token = access_token or HfFolder.get_token()
|
||||
self.reverse_paths = self._reverse_paths(self.datasets)
|
||||
self.event_bus = event_bus
|
||||
|
||||
def all_models(self)->Dict[str,ModelLoadInfo]:
|
||||
'''
|
||||
@@ -200,63 +197,39 @@ class ModelInstall(object):
|
||||
Returns a set of dict objects corresponding to newly-created stanzas in models.yaml.
|
||||
'''
|
||||
|
||||
if self.event_bus:
|
||||
self.event_bus.emit_model_import_started(str(model_path_id_or_url))
|
||||
|
||||
if not models_installed:
|
||||
models_installed = dict()
|
||||
|
||||
# A little hack to allow nested routines to retrieve info on the requested ID
|
||||
self.current_id = model_path_id_or_url
|
||||
path = Path(model_path_id_or_url)
|
||||
# checkpoint file, or similar
|
||||
if path.is_file():
|
||||
models_installed.update({str(path):self._install_path(path)})
|
||||
|
||||
try:
|
||||
# checkpoint file, or similar
|
||||
if path.is_file():
|
||||
models_installed.update({str(path):self._install_path(path)})
|
||||
# folders style or similar
|
||||
elif path.is_dir() and any([(path/x).exists() for x in \
|
||||
{'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'}
|
||||
]
|
||||
):
|
||||
models_installed.update({str(model_path_id_or_url): self._install_path(path)})
|
||||
|
||||
# folders style or similar
|
||||
elif path.is_dir() and any([(path/x).exists() for x in \
|
||||
{'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'}
|
||||
]
|
||||
):
|
||||
models_installed.update({str(model_path_id_or_url): self._install_path(path)})
|
||||
# recursive scan
|
||||
elif path.is_dir():
|
||||
for child in path.iterdir():
|
||||
self.heuristic_import(child, models_installed=models_installed)
|
||||
|
||||
# recursive scan
|
||||
elif path.is_dir():
|
||||
for child in path.iterdir():
|
||||
self.heuristic_import(child, models_installed=models_installed)
|
||||
# huggingface repo
|
||||
elif len(str(model_path_id_or_url).split('/')) == 2:
|
||||
models_installed.update({str(model_path_id_or_url): self._install_repo(str(model_path_id_or_url))})
|
||||
|
||||
# huggingface repo
|
||||
elif len(str(model_path_id_or_url).split('/')) == 2:
|
||||
models_installed.update({str(model_path_id_or_url): self._install_repo(str(model_path_id_or_url))})
|
||||
# a URL
|
||||
elif str(model_path_id_or_url).startswith(("http:", "https:", "ftp:")):
|
||||
models_installed.update({str(model_path_id_or_url): self._install_url(model_path_id_or_url)})
|
||||
|
||||
# a URL
|
||||
elif str(model_path_id_or_url).startswith(("http:", "https:", "ftp:")):
|
||||
models_installed.update({str(model_path_id_or_url): self._install_url(model_path_id_or_url)})
|
||||
else:
|
||||
raise KeyError(f'{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping')
|
||||
|
||||
else:
|
||||
errmsg = f'{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping'
|
||||
raise KeyError(errmsg)
|
||||
|
||||
if self.event_bus:
|
||||
for path, add_model_result in models_installed.items():
|
||||
self.event_bus.emit_model_import_completed(
|
||||
str(path),
|
||||
import_info = add_model_result,
|
||||
)
|
||||
except Exception as e:
|
||||
if self.event_bus:
|
||||
self.event_bus.emit_model_import_completed(
|
||||
str(path),
|
||||
import_info = None,
|
||||
success = False,
|
||||
error = str(e),
|
||||
)
|
||||
return models_installed
|
||||
else:
|
||||
raise
|
||||
|
||||
return models_installed
|
||||
|
||||
# install a model from a local path. The optional info parameter is there to prevent
|
||||
@@ -265,14 +238,10 @@ class ModelInstall(object):
|
||||
info = info or ModelProbe().heuristic_probe(path,self.prediction_helper)
|
||||
if not info:
|
||||
logger.warning(f'Unable to parse format of {path}')
|
||||
raise ValueError(f'Unable to parse format of {path}')
|
||||
|
||||
return None
|
||||
model_name = path.stem if path.is_file() else path.name
|
||||
|
||||
if self.mgr.model_exists(model_name, info.base_type, info.model_type):
|
||||
errmsg = f'A model named "{model_name}" is already installed.'
|
||||
raise ValueError(errmsg)
|
||||
|
||||
raise ValueError(f'A model named "{model_name}" is already installed.')
|
||||
attributes = self._make_attributes(path,info)
|
||||
return self.mgr.add_model(model_name = model_name,
|
||||
base_model = info.base_type,
|
||||
@@ -282,7 +251,7 @@ class ModelInstall(object):
|
||||
|
||||
def _install_url(self, url: str)->AddModelResult:
|
||||
with TemporaryDirectory(dir=self.config.models_path) as staging:
|
||||
location = download_with_resume(url,Path(staging),event_bus=self.event_bus)
|
||||
location = download_with_resume(url,Path(staging))
|
||||
if not location:
|
||||
logger.error(f'Unable to download {url}. Skipping.')
|
||||
info = ModelProbe().heuristic_probe(location)
|
||||
@@ -341,6 +310,8 @@ class ModelInstall(object):
|
||||
if key := self.reverse_paths.get(path_name):
|
||||
(name, base, mtype) = ModelManager.parse_key(key)
|
||||
return name
|
||||
elif location.is_dir():
|
||||
return location.name
|
||||
else:
|
||||
return location.stem
|
||||
|
||||
@@ -396,7 +367,7 @@ class ModelInstall(object):
|
||||
model = None
|
||||
for revision in revisions:
|
||||
try:
|
||||
model = StableDiffusionPipeline.from_pretrained(repo_id,revision=revision,safety_checker=None)
|
||||
model = DiffusionPipeline.from_pretrained(repo_id,revision=revision,safety_checker=None)
|
||||
except: # most errors are due to fp16 not being present. Fix this to catch other errors
|
||||
pass
|
||||
if model:
|
||||
@@ -415,8 +386,7 @@ class ModelInstall(object):
|
||||
p = hf_download_with_resume(repo_id,
|
||||
model_dir=location,
|
||||
model_name=filename,
|
||||
access_token = self.access_token,
|
||||
event_bus = self.event_bus,
|
||||
access_token = self.access_token
|
||||
)
|
||||
if p:
|
||||
paths.append(p)
|
||||
@@ -457,15 +427,12 @@ def hf_download_from_pretrained(
|
||||
return destination
|
||||
|
||||
# ---------------------------------------------
|
||||
# TODO: This function is almost identical to invokeai.backend.util.download_with_resume
|
||||
# and should be merged
|
||||
def hf_download_with_resume(
|
||||
repo_id: str,
|
||||
model_dir: str,
|
||||
model_name: str,
|
||||
model_dest: Path = None,
|
||||
access_token: str = None,
|
||||
event_bus = None,
|
||||
) -> Path:
|
||||
model_dest = model_dest or Path(os.path.join(model_dir, model_name))
|
||||
os.makedirs(model_dir, exist_ok=True)
|
||||
@@ -482,22 +449,15 @@ def hf_download_with_resume(
|
||||
open_mode = "ab"
|
||||
|
||||
resp = requests.get(url, headers=header, stream=True)
|
||||
content_length = int(resp.headers.get("content-length", 0))
|
||||
|
||||
if event_bus:
|
||||
event_bus.emit_download_started(url)
|
||||
total = int(resp.headers.get("content-length", 0))
|
||||
|
||||
if (
|
||||
resp.status_code == 416
|
||||
): # "range not satisfiable", which means nothing to return
|
||||
logger.info(f"{model_name}: complete file found. Skipping.")
|
||||
if event_bus:
|
||||
event_bus.emit_download_completed(url,resp.status_code,model_dest)
|
||||
return model_dest
|
||||
elif resp.status_code == 404:
|
||||
logger.warning("File not found")
|
||||
if event_bus:
|
||||
event_bus.emit_download_completed(url,resp.status_code,None)
|
||||
return None
|
||||
elif resp.status_code != 200:
|
||||
logger.warning(f"{model_name}: {resp.reason}")
|
||||
@@ -506,15 +466,11 @@ def hf_download_with_resume(
|
||||
else:
|
||||
logger.info(f"{model_name}: Downloading...")
|
||||
|
||||
MB10 = 10 * 1048576
|
||||
downloaded = exist_size
|
||||
previous_interval = 0
|
||||
|
||||
try:
|
||||
with open(model_dest, open_mode) as file, tqdm(
|
||||
desc=model_name,
|
||||
initial=exist_size,
|
||||
total=content_length + exist_size,
|
||||
total=total + exist_size,
|
||||
unit="iB",
|
||||
unit_scale=True,
|
||||
unit_divisor=1000,
|
||||
@@ -522,20 +478,9 @@ def hf_download_with_resume(
|
||||
for data in resp.iter_content(chunk_size=1024):
|
||||
size = file.write(data)
|
||||
bar.update(size)
|
||||
downloaded += size
|
||||
if event_bus and downloaded // MB10 > previous_interval:
|
||||
previous_interval = downloaded // MB10
|
||||
event_bus.emit_download_progress(url, downloaded, content_length)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"An error occurred while downloading {model_name}: {str(e)}")
|
||||
if event_bus:
|
||||
event_bus.emit_download_completed(url,500,None)
|
||||
return None
|
||||
|
||||
if event_bus:
|
||||
event_bus.emit_download_completed(url,resp.status_code,model_dest)
|
||||
|
||||
return model_dest
|
||||
|
||||
|
||||
|
||||
@@ -3,6 +3,6 @@ Initialization file for invokeai.backend.model_management
|
||||
"""
|
||||
from .model_manager import ModelManager, ModelInfo, AddModelResult, SchedulerPredictionType
|
||||
from .model_cache import ModelCache
|
||||
from .models import BaseModelType, ModelType, SubModelType, ModelVariantType, ModelNotFoundException
|
||||
from .models import BaseModelType, ModelType, SubModelType, ModelVariantType, ModelNotFoundException, DuplicateModelException
|
||||
from .model_merge import ModelMerger, MergeInterpolationMethod
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import re
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
from packaging import version
|
||||
|
||||
import torch
|
||||
from safetensors.torch import load_file
|
||||
@@ -63,6 +64,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import (
|
||||
StableDiffusionSafetyChecker,
|
||||
)
|
||||
from diffusers.utils import is_safetensors_available
|
||||
import transformers
|
||||
from transformers import (
|
||||
AutoFeatureExtractor,
|
||||
BertTokenizerFast,
|
||||
@@ -841,7 +843,16 @@ def convert_ldm_clip_checkpoint(checkpoint):
|
||||
key
|
||||
]
|
||||
|
||||
text_model.load_state_dict(text_model_dict)
|
||||
# transformers 4.31.0 and higher - this key no longer in state dict
|
||||
if version.parse(transformers.__version__) >= version.parse("4.31.0"):
|
||||
position_ids = text_model_dict.pop("text_model.embeddings.position_ids", None)
|
||||
text_model.load_state_dict(text_model_dict)
|
||||
if position_ids is not None:
|
||||
text_model.text_model.embeddings.position_ids.copy_(position_ids)
|
||||
|
||||
# transformers 4.30.2 and lower - position_ids is part of state_dict
|
||||
else:
|
||||
text_model.load_state_dict(text_model_dict)
|
||||
|
||||
return text_model
|
||||
|
||||
@@ -947,7 +958,16 @@ def convert_open_clip_checkpoint(checkpoint):
|
||||
|
||||
text_model_dict[new_key] = checkpoint[key]
|
||||
|
||||
text_model.load_state_dict(text_model_dict)
|
||||
# transformers 4.31.0 and higher - this key no longer in state dict
|
||||
if version.parse(transformers.__version__) >= version.parse("4.31.0"):
|
||||
position_ids = text_model_dict.pop("text_model.embeddings.position_ids", None)
|
||||
text_model.load_state_dict(text_model_dict)
|
||||
if position_ids is not None:
|
||||
text_model.text_model.embeddings.position_ids.copy_(position_ids)
|
||||
|
||||
# transformers 4.30.2 and lower - position_ids is part of state_dict
|
||||
else:
|
||||
text_model.load_state_dict(text_model_dict)
|
||||
|
||||
return text_model
|
||||
|
||||
|
||||
@@ -328,6 +328,25 @@ class ModelCache(object):
|
||||
|
||||
refs = sys.getrefcount(cache_entry.model)
|
||||
|
||||
# manualy clear local variable references of just finished function calls
|
||||
# for some reason python don't want to collect it even by gc.collect() immidiately
|
||||
if refs > 2:
|
||||
while True:
|
||||
cleared = False
|
||||
for referrer in gc.get_referrers(cache_entry.model):
|
||||
if type(referrer).__name__ == "frame":
|
||||
# RuntimeError: cannot clear an executing frame
|
||||
with suppress(RuntimeError):
|
||||
referrer.clear()
|
||||
cleared = True
|
||||
#break
|
||||
|
||||
# repeat if referrers changes(due to frame clear), else exit loop
|
||||
if cleared:
|
||||
gc.collect()
|
||||
else:
|
||||
break
|
||||
|
||||
device = cache_entry.model.device if hasattr(cache_entry.model, "device") else None
|
||||
self.logger.debug(f"Model: {model_key}, locks: {cache_entry._locks}, device: {device}, loaded: {cache_entry.loaded}, refs: {refs}")
|
||||
|
||||
@@ -363,6 +382,9 @@ class ModelCache(object):
|
||||
self.logger.debug(f'GPU VRAM freed: {(mem.vram_used/GIG):.2f} GB')
|
||||
vram_in_use += mem.vram_used # note vram_used is negative
|
||||
self.logger.debug(f'{(vram_in_use/GIG):.2f}GB VRAM used for models; max allowed={(reserved/GIG):.2f}GB')
|
||||
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
def _local_model_hash(self, model_path: Union[str, Path]) -> str:
|
||||
sha = hashlib.sha256()
|
||||
|
||||
@@ -106,16 +106,16 @@ providing information about a model defined in models.yaml. For example:
|
||||
|
||||
>>> models = mgr.list_models()
|
||||
>>> json.dumps(models[0])
|
||||
{"path": "/home/lstein/invokeai-main/models/sd-1/controlnet/canny",
|
||||
"model_format": "diffusers",
|
||||
"name": "canny",
|
||||
"base_model": "sd-1",
|
||||
{"path": "/home/lstein/invokeai-main/models/sd-1/controlnet/canny",
|
||||
"model_format": "diffusers",
|
||||
"name": "canny",
|
||||
"base_model": "sd-1",
|
||||
"type": "controlnet"
|
||||
}
|
||||
|
||||
You can filter by model type and base model as shown here:
|
||||
|
||||
|
||||
|
||||
controlnets = mgr.list_models(model_type=ModelType.ControlNet,
|
||||
base_model=BaseModelType.StableDiffusion1)
|
||||
for c in controlnets:
|
||||
@@ -140,14 +140,14 @@ Layout of the `models` directory:
|
||||
|
||||
models
|
||||
├── sd-1
|
||||
│ ├── controlnet
|
||||
│ ├── lora
|
||||
│ ├── main
|
||||
│ └── embedding
|
||||
│ ├── controlnet
|
||||
│ ├── lora
|
||||
│ ├── main
|
||||
│ └── embedding
|
||||
├── sd-2
|
||||
│ ├── controlnet
|
||||
│ ├── lora
|
||||
│ ├── main
|
||||
│ ├── controlnet
|
||||
│ ├── lora
|
||||
│ ├── main
|
||||
│ └── embedding
|
||||
└── core
|
||||
├── face_reconstruction
|
||||
@@ -195,7 +195,7 @@ name, base model, type and a dict of model attributes. See
|
||||
`invokeai/backend/model_management/models` for the attributes required
|
||||
by each model type.
|
||||
|
||||
A model can be deleted using `del_model()`, providing the same
|
||||
A model can be deleted using `del_model()`, providing the same
|
||||
identifying information as `get_model()`
|
||||
|
||||
The `heuristic_import()` method will take a set of strings
|
||||
@@ -251,7 +251,9 @@ from .model_search import ModelSearch
|
||||
from .models import (
|
||||
BaseModelType, ModelType, SubModelType,
|
||||
ModelError, SchedulerPredictionType, MODEL_CLASSES,
|
||||
ModelConfigBase, ModelNotFoundException, InvalidModelException,
|
||||
ModelConfigBase,
|
||||
ModelNotFoundException, InvalidModelException,
|
||||
DuplicateModelException,
|
||||
)
|
||||
|
||||
# We are only starting to number the config file with release 3.
|
||||
@@ -304,7 +306,7 @@ class ModelManager(object):
|
||||
logger: types.ModuleType = logger,
|
||||
):
|
||||
"""
|
||||
Initialize with the path to the models.yaml config file.
|
||||
Initialize with the path to the models.yaml config file.
|
||||
Optional parameters are the torch device type, precision, max_models,
|
||||
and sequential_offload boolean. Note that the default device
|
||||
type and precision are set up for a CUDA system running at half precision.
|
||||
@@ -323,7 +325,7 @@ class ModelManager(object):
|
||||
self.config_meta = ConfigMeta(**config.pop("__metadata__"))
|
||||
# TODO: metadata not found
|
||||
# TODO: version check
|
||||
|
||||
|
||||
self.app_config = InvokeAIAppConfig.get_config()
|
||||
self.logger = logger
|
||||
self.cache = ModelCache(
|
||||
@@ -431,7 +433,7 @@ class ModelManager(object):
|
||||
:param model_name: symbolic name of the model in models.yaml
|
||||
:param model_type: ModelType enum indicating the type of model to return
|
||||
:param base_model: BaseModelType enum indicating the base model used by this model
|
||||
:param submode_typel: an ModelType enum indicating the portion of
|
||||
:param submode_typel: an ModelType enum indicating the portion of
|
||||
the model to retrieve (e.g. ModelType.Vae)
|
||||
"""
|
||||
model_class = MODEL_CLASSES[base_model][model_type]
|
||||
@@ -456,7 +458,7 @@ class ModelManager(object):
|
||||
raise ModelNotFoundException(f"Model not found - {model_key}")
|
||||
|
||||
# vae/movq override
|
||||
# TODO:
|
||||
# TODO:
|
||||
if submodel_type is not None and hasattr(model_config, submodel_type):
|
||||
override_path = getattr(model_config, submodel_type)
|
||||
if override_path:
|
||||
@@ -489,7 +491,7 @@ class ModelManager(object):
|
||||
self.cache_keys[model_key].add(model_context.key)
|
||||
|
||||
model_hash = "<NO_HASH>" # TODO:
|
||||
|
||||
|
||||
return ModelInfo(
|
||||
context = model_context,
|
||||
name = model_name,
|
||||
@@ -518,7 +520,7 @@ class ModelManager(object):
|
||||
|
||||
def model_names(self) -> List[Tuple[str, BaseModelType, ModelType]]:
|
||||
"""
|
||||
Return a list of (str, BaseModelType, ModelType) corresponding to all models
|
||||
Return a list of (str, BaseModelType, ModelType) corresponding to all models
|
||||
known to the configuration.
|
||||
"""
|
||||
return [(self.parse_key(x)) for x in self.models.keys()]
|
||||
@@ -692,12 +694,12 @@ class ModelManager(object):
|
||||
if new_name is None and new_base is None:
|
||||
self.logger.error("rename_model() called with neither a new_name nor a new_base. {model_name} unchanged.")
|
||||
return
|
||||
|
||||
|
||||
model_key = self.create_key(model_name, base_model, model_type)
|
||||
model_cfg = self.models.get(model_key, None)
|
||||
if not model_cfg:
|
||||
raise ModelNotFoundException(f"Unknown model: {model_key}")
|
||||
|
||||
|
||||
old_path = self.app_config.root_path / model_cfg.path
|
||||
new_name = new_name or model_name
|
||||
new_base = new_base or base_model
|
||||
@@ -726,7 +728,7 @@ class ModelManager(object):
|
||||
self.models.pop(model_key, None) # delete
|
||||
self.models[new_key] = model_cfg
|
||||
self.commit()
|
||||
|
||||
|
||||
def convert_model (
|
||||
self,
|
||||
model_name: str,
|
||||
@@ -776,12 +778,12 @@ class ModelManager(object):
|
||||
# something went wrong, so don't leave dangling diffusers model in directory or it will cause a duplicate model error!
|
||||
rmtree(new_diffusers_path)
|
||||
raise
|
||||
|
||||
|
||||
if checkpoint_path.exists() and checkpoint_path.is_relative_to(self.app_config.models_path):
|
||||
checkpoint_path.unlink()
|
||||
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def search_models(self, search_folder):
|
||||
self.logger.info(f"Finding Models In: {search_folder}")
|
||||
models_folder_ckpt = Path(search_folder).glob("**/*.ckpt")
|
||||
@@ -824,10 +826,14 @@ class ModelManager(object):
|
||||
assert config_file_path is not None,'no config file path to write to'
|
||||
config_file_path = self.app_config.root_path / config_file_path
|
||||
tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp")
|
||||
with open(tmpfile, "w", encoding="utf-8") as outfile:
|
||||
outfile.write(self.preamble())
|
||||
outfile.write(yaml_str)
|
||||
os.replace(tmpfile, config_file_path)
|
||||
try:
|
||||
with open(tmpfile, "w", encoding="utf-8") as outfile:
|
||||
outfile.write(self.preamble())
|
||||
outfile.write(yaml_str)
|
||||
os.replace(tmpfile, config_file_path)
|
||||
except OSError as err:
|
||||
self.logger.warning(f"Could not modify the config file at {config_file_path}")
|
||||
self.logger.warning(err)
|
||||
|
||||
def preamble(self) -> str:
|
||||
"""
|
||||
@@ -854,7 +860,7 @@ class ModelManager(object):
|
||||
loaded_files = set()
|
||||
new_models_found = False
|
||||
|
||||
self.logger.info(f'scanning {self.app_config.models_path} for new models')
|
||||
self.logger.info(f'Scanning {self.app_config.models_path} for new models')
|
||||
with Chdir(self.app_config.root_path):
|
||||
for model_key, model_config in list(self.models.items()):
|
||||
model_name, cur_base_model, cur_model_type = self.parse_key(model_key)
|
||||
@@ -887,15 +893,18 @@ class ModelManager(object):
|
||||
model_name = model_path.name if model_path.is_dir() else model_path.stem
|
||||
model_key = self.create_key(model_name, cur_base_model, cur_model_type)
|
||||
|
||||
if model_key in self.models:
|
||||
raise Exception(f"Model with key {model_key} added twice")
|
||||
|
||||
if model_path.is_relative_to(self.app_config.root_path):
|
||||
model_path = model_path.relative_to(self.app_config.root_path)
|
||||
try:
|
||||
if model_key in self.models:
|
||||
raise DuplicateModelException(f"Model with key {model_key} added twice")
|
||||
|
||||
if model_path.is_relative_to(self.app_config.root_path):
|
||||
model_path = model_path.relative_to(self.app_config.root_path)
|
||||
|
||||
model_config: ModelConfigBase = model_class.probe_config(str(model_path))
|
||||
self.models[model_key] = model_config
|
||||
new_models_found = True
|
||||
except DuplicateModelException as e:
|
||||
self.logger.warning(e)
|
||||
except InvalidModelException:
|
||||
self.logger.warning(f"Not a valid model: {model_path}")
|
||||
except NotImplementedError as e:
|
||||
@@ -934,26 +943,34 @@ class ModelManager(object):
|
||||
def models_found(self):
|
||||
return self.new_models_found
|
||||
|
||||
config = self.app_config
|
||||
|
||||
# LS: hacky
|
||||
# Patch in the SD VAE from core so that it is available for use by the UI
|
||||
try:
|
||||
self.heuristic_import({config.root_path / 'models/core/convert/sd-vae-ft-mse'})
|
||||
except:
|
||||
pass
|
||||
|
||||
installer = ModelInstall(config = self.app_config,
|
||||
model_manager = self,
|
||||
prediction_type_helper = ask_user_for_prediction_type,
|
||||
)
|
||||
config = self.app_config
|
||||
known_paths = {config.root_path / x['path'] for x in self.list_models()}
|
||||
directories = {config.root_path / x for x in [config.autoimport_dir,
|
||||
config.lora_dir,
|
||||
config.embedding_dir,
|
||||
config.controlnet_dir]
|
||||
config.controlnet_dir,
|
||||
] if x
|
||||
}
|
||||
scanner = ScanAndImport(directories, self.logger, ignore=known_paths, installer=installer)
|
||||
scanner.search()
|
||||
|
||||
return scanner.models_found()
|
||||
|
||||
def heuristic_import(self,
|
||||
items_to_import: Set[str],
|
||||
prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None,
|
||||
event_bus = None, # EventServiceBase, with circular dependency issues
|
||||
)->Dict[str, AddModelResult]:
|
||||
'''Import a list of paths, repo_ids or URLs. Returns the set of
|
||||
successfully imported items.
|
||||
@@ -978,16 +995,12 @@ class ModelManager(object):
|
||||
# avoid circular import here
|
||||
from invokeai.backend.install.model_install_backend import ModelInstall
|
||||
successfully_installed = dict()
|
||||
|
||||
|
||||
installer = ModelInstall(config = self.app_config,
|
||||
prediction_type_helper = prediction_type_helper,
|
||||
model_manager = self,
|
||||
event_bus = event_bus,
|
||||
)
|
||||
model_manager = self)
|
||||
for thing in items_to_import:
|
||||
installed = installer.heuristic_import(thing)
|
||||
successfully_installed.update(installed)
|
||||
|
||||
self.commit()
|
||||
self.commit()
|
||||
return successfully_installed
|
||||
|
||||
|
||||
@@ -39,6 +39,7 @@ class ModelProbe(object):
|
||||
|
||||
CLASS2TYPE = {
|
||||
'StableDiffusionPipeline' : ModelType.Main,
|
||||
'StableDiffusionInpaintPipeline' : ModelType.Main,
|
||||
'StableDiffusionXLPipeline' : ModelType.Main,
|
||||
'StableDiffusionXLImg2ImgPipeline' : ModelType.Main,
|
||||
'AutoencoderKL' : ModelType.Vae,
|
||||
@@ -401,7 +402,7 @@ class PipelineFolderProbe(FolderProbeBase):
|
||||
|
||||
in_channels = conf['in_channels']
|
||||
if in_channels == 9:
|
||||
return ModelVariantType.Inpainting
|
||||
return ModelVariantType.Inpaint
|
||||
elif in_channels == 5:
|
||||
return ModelVariantType.Depth
|
||||
elif in_channels == 4:
|
||||
|
||||
@@ -2,7 +2,11 @@ import inspect
|
||||
from enum import Enum
|
||||
from pydantic import BaseModel
|
||||
from typing import Literal, get_origin
|
||||
from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings, ModelNotFoundException, InvalidModelException
|
||||
from .base import (
|
||||
BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase,
|
||||
ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings,
|
||||
ModelNotFoundException, InvalidModelException, DuplicateModelException
|
||||
)
|
||||
from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model
|
||||
from .sdxl import StableDiffusionXLModel
|
||||
from .vae import VaeModel
|
||||
|
||||
@@ -15,6 +15,9 @@ from contextlib import suppress
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
|
||||
|
||||
class DuplicateModelException(Exception):
|
||||
pass
|
||||
|
||||
class InvalidModelException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@@ -219,6 +219,7 @@ class ControlNetData:
|
||||
begin_step_percent: float = Field(default=0.0)
|
||||
end_step_percent: float = Field(default=1.0)
|
||||
control_mode: str = Field(default="balanced")
|
||||
resize_mode: str = Field(default="just_resize")
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -653,7 +654,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
if cfg_injection:
|
||||
# Inferred ControlNet only for the conditional batch.
|
||||
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
||||
# add 0 to the unconditional batch to keep it unchanged.
|
||||
# prepend zeros for unconditional batch
|
||||
down_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_samples]
|
||||
mid_sample = torch.cat([torch.zeros_like(mid_sample), mid_sample])
|
||||
|
||||
@@ -954,53 +955,3 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
debug_image(
|
||||
img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True
|
||||
)
|
||||
|
||||
# Copied from diffusers pipeline_stable_diffusion_controlnet.py
|
||||
# Returns torch.Tensor of shape (batch_size, 3, height, width)
|
||||
@staticmethod
|
||||
def prepare_control_image(
|
||||
image,
|
||||
# FIXME: need to fix hardwiring of width and height, change to basing on latents dimensions?
|
||||
# latents,
|
||||
width=512, # should be 8 * latent.shape[3]
|
||||
height=512, # should be 8 * latent height[2]
|
||||
batch_size=1,
|
||||
num_images_per_prompt=1,
|
||||
device="cuda",
|
||||
dtype=torch.float16,
|
||||
do_classifier_free_guidance=True,
|
||||
control_mode="balanced"
|
||||
):
|
||||
|
||||
if not isinstance(image, torch.Tensor):
|
||||
if isinstance(image, PIL.Image.Image):
|
||||
image = [image]
|
||||
|
||||
if isinstance(image[0], PIL.Image.Image):
|
||||
images = []
|
||||
for image_ in image:
|
||||
image_ = image_.convert("RGB")
|
||||
image_ = image_.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])
|
||||
image_ = np.array(image_)
|
||||
image_ = image_[None, :]
|
||||
images.append(image_)
|
||||
image = images
|
||||
image = np.concatenate(image, axis=0)
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = image.transpose(0, 3, 1, 2)
|
||||
image = torch.from_numpy(image)
|
||||
elif isinstance(image[0], torch.Tensor):
|
||||
image = torch.cat(image, dim=0)
|
||||
|
||||
image_batch_size = image.shape[0]
|
||||
if image_batch_size == 1:
|
||||
repeat_by = batch_size
|
||||
else:
|
||||
# image batch size is the same as prompt batch size
|
||||
repeat_by = num_images_per_prompt
|
||||
image = image.repeat_interleave(repeat_by, dim=0)
|
||||
image = image.to(device=device, dtype=dtype)
|
||||
cfg_injection = (control_mode == "more_control" or control_mode == "unbalanced")
|
||||
if do_classifier_free_guidance and not cfg_injection:
|
||||
image = torch.cat([image] * 2)
|
||||
return image
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import math
|
||||
import torch
|
||||
import diffusers
|
||||
|
||||
|
||||
if torch.backends.mps.is_available():
|
||||
@@ -61,3 +63,150 @@ def new_torch_interpolate(input, size=None, scale_factor=None, mode='nearest', a
|
||||
return _torch_interpolate(input, size, scale_factor, mode, align_corners, recompute_scale_factor, antialias)
|
||||
|
||||
torch.nn.functional.interpolate = new_torch_interpolate
|
||||
|
||||
# TODO: refactor it
|
||||
_SlicedAttnProcessor = diffusers.models.attention_processor.SlicedAttnProcessor
|
||||
class ChunkedSlicedAttnProcessor:
|
||||
r"""
|
||||
Processor for implementing sliced attention.
|
||||
|
||||
Args:
|
||||
slice_size (`int`, *optional*):
|
||||
The number of steps to compute attention. Uses as many slices as `attention_head_dim // slice_size`, and
|
||||
`attention_head_dim` must be a multiple of the `slice_size`.
|
||||
"""
|
||||
|
||||
def __init__(self, slice_size):
|
||||
assert isinstance(slice_size, int)
|
||||
slice_size = 1 # TODO: maybe implement chunking in batches too when enough memory
|
||||
self.slice_size = slice_size
|
||||
self._sliced_attn_processor = _SlicedAttnProcessor(slice_size)
|
||||
|
||||
def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None):
|
||||
if self.slice_size != 1 or attn.upcast_attention:
|
||||
return self._sliced_attn_processor(attn, hidden_states, encoder_hidden_states, attention_mask)
|
||||
|
||||
residual = hidden_states
|
||||
|
||||
input_ndim = hidden_states.ndim
|
||||
|
||||
if input_ndim == 4:
|
||||
batch_size, channel, height, width = hidden_states.shape
|
||||
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
||||
|
||||
batch_size, sequence_length, _ = (
|
||||
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
||||
)
|
||||
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
||||
|
||||
if attn.group_norm is not None:
|
||||
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
||||
|
||||
query = attn.to_q(hidden_states)
|
||||
dim = query.shape[-1]
|
||||
query = attn.head_to_batch_dim(query)
|
||||
|
||||
if encoder_hidden_states is None:
|
||||
encoder_hidden_states = hidden_states
|
||||
elif attn.norm_cross:
|
||||
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
||||
|
||||
key = attn.to_k(encoder_hidden_states)
|
||||
value = attn.to_v(encoder_hidden_states)
|
||||
key = attn.head_to_batch_dim(key)
|
||||
value = attn.head_to_batch_dim(value)
|
||||
|
||||
batch_size_attention, query_tokens, _ = query.shape
|
||||
hidden_states = torch.zeros(
|
||||
(batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype
|
||||
)
|
||||
|
||||
chunk_tmp_tensor = torch.empty(self.slice_size, query.shape[1], key.shape[1], dtype=query.dtype, device=query.device)
|
||||
|
||||
for i in range(batch_size_attention // self.slice_size):
|
||||
start_idx = i * self.slice_size
|
||||
end_idx = (i + 1) * self.slice_size
|
||||
|
||||
query_slice = query[start_idx:end_idx]
|
||||
key_slice = key[start_idx:end_idx]
|
||||
attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None
|
||||
|
||||
self.get_attention_scores_chunked(attn, query_slice, key_slice, attn_mask_slice, hidden_states[start_idx:end_idx], value[start_idx:end_idx], chunk_tmp_tensor)
|
||||
|
||||
hidden_states = attn.batch_to_head_dim(hidden_states)
|
||||
|
||||
# linear proj
|
||||
hidden_states = attn.to_out[0](hidden_states)
|
||||
# dropout
|
||||
hidden_states = attn.to_out[1](hidden_states)
|
||||
|
||||
if input_ndim == 4:
|
||||
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
||||
|
||||
if attn.residual_connection:
|
||||
hidden_states = hidden_states + residual
|
||||
|
||||
hidden_states = hidden_states / attn.rescale_output_factor
|
||||
|
||||
return hidden_states
|
||||
|
||||
|
||||
def get_attention_scores_chunked(self, attn, query, key, attention_mask, hidden_states, value, chunk):
|
||||
# batch size = 1
|
||||
assert query.shape[0] == 1
|
||||
assert key.shape[0] == 1
|
||||
assert value.shape[0] == 1
|
||||
assert hidden_states.shape[0] == 1
|
||||
|
||||
dtype = query.dtype
|
||||
if attn.upcast_attention:
|
||||
query = query.float()
|
||||
key = key.float()
|
||||
|
||||
#out_item_size = query.dtype.itemsize
|
||||
#if attn.upcast_attention:
|
||||
# out_item_size = torch.float32.itemsize
|
||||
out_item_size = query.element_size()
|
||||
if attn.upcast_attention:
|
||||
out_item_size = 4
|
||||
|
||||
chunk_size = 2 ** 29
|
||||
|
||||
out_size = query.shape[1] * key.shape[1] * out_item_size
|
||||
chunks_count = min(query.shape[1], math.ceil((out_size - 1) / chunk_size))
|
||||
chunk_step = max(1, int(query.shape[1] / chunks_count))
|
||||
|
||||
key = key.transpose(-1, -2)
|
||||
|
||||
def _get_chunk_view(tensor, start, length):
|
||||
if start + length > tensor.shape[1]:
|
||||
length = tensor.shape[1] - start
|
||||
#print(f"view: [{tensor.shape[0]},{tensor.shape[1]},{tensor.shape[2]}] - start: {start}, length: {length}")
|
||||
return tensor[:,start:start+length]
|
||||
|
||||
for chunk_pos in range(0, query.shape[1], chunk_step):
|
||||
if attention_mask is not None:
|
||||
torch.baddbmm(
|
||||
_get_chunk_view(attention_mask, chunk_pos, chunk_step),
|
||||
_get_chunk_view(query, chunk_pos, chunk_step),
|
||||
key,
|
||||
beta=1,
|
||||
alpha=attn.scale,
|
||||
out=chunk,
|
||||
)
|
||||
else:
|
||||
torch.baddbmm(
|
||||
torch.zeros((1,1,1), device=query.device, dtype=query.dtype),
|
||||
_get_chunk_view(query, chunk_pos, chunk_step),
|
||||
key,
|
||||
beta=0,
|
||||
alpha=attn.scale,
|
||||
out=chunk,
|
||||
)
|
||||
chunk = chunk.softmax(dim=-1)
|
||||
torch.bmm(chunk, value, out=_get_chunk_view(hidden_states, chunk_pos, chunk_step))
|
||||
|
||||
#del chunk
|
||||
|
||||
|
||||
diffusers.models.attention_processor.SlicedAttnProcessor = ChunkedSlicedAttnProcessor
|
||||
|
||||
@@ -21,6 +21,7 @@ from tqdm import tqdm
|
||||
import invokeai.backend.util.logging as logger
|
||||
from .devices import torch_dtype
|
||||
|
||||
|
||||
def log_txt_as_img(wh, xc, size=10):
|
||||
# wh a tuple of (width, height)
|
||||
# xc a list of captions to plot
|
||||
@@ -284,11 +285,7 @@ def ask_user(question: str, answers: list):
|
||||
|
||||
|
||||
# -------------------------------------
|
||||
def download_with_resume(url: str,
|
||||
dest: Path,
|
||||
access_token: str = None,
|
||||
event_bus = None, # EventServiceBase (circular import issues)
|
||||
) -> Path:
|
||||
def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path:
|
||||
"""
|
||||
Download a model file.
|
||||
:param url: https, http or ftp URL
|
||||
@@ -326,13 +323,8 @@ def download_with_resume(url: str,
|
||||
os.remove(dest)
|
||||
exist_size = 0
|
||||
|
||||
if event_bus:
|
||||
event_bus.emit_download_started(url)
|
||||
|
||||
if resp.status_code == 416 or (content_length > 0 and exist_size == content_length):
|
||||
logger.warning(f"{dest}: complete file found. Skipping.")
|
||||
if event_bus:
|
||||
event_bus.emit_download_completed(url,resp.status_code,dest)
|
||||
return dest
|
||||
elif resp.status_code == 206 or exist_size > 0:
|
||||
logger.warning(f"{dest}: partial file found. Resuming...")
|
||||
@@ -341,19 +333,10 @@ def download_with_resume(url: str,
|
||||
else:
|
||||
logger.info(f"{dest}: Downloading...")
|
||||
|
||||
# If less than 2K, it's not a model - usually an HTML document of some sort
|
||||
if content_length < 2000:
|
||||
logger.error(f"ERROR DOWNLOADING {url}: {resp.text}")
|
||||
if event_bus:
|
||||
event_bus.emit_download_completed(url, 500, None)
|
||||
return None
|
||||
|
||||
# these variables are used in progress reporting events
|
||||
MB10 = 10 * 1048576
|
||||
downloaded = exist_size
|
||||
previous_interval = 0
|
||||
|
||||
try:
|
||||
if content_length < 2000:
|
||||
logger.error(f"ERROR DOWNLOADING {url}: {resp.text}")
|
||||
return None
|
||||
|
||||
with open(dest, open_mode) as file, tqdm(
|
||||
desc=str(dest),
|
||||
@@ -366,20 +349,10 @@ def download_with_resume(url: str,
|
||||
for data in resp.iter_content(chunk_size=1024):
|
||||
size = file.write(data)
|
||||
bar.update(size)
|
||||
downloaded += size
|
||||
if event_bus and downloaded // MB10 > previous_interval:
|
||||
previous_interval = downloaded // MB10
|
||||
event_bus.emit_download_progress(url, downloaded, content_length)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"An error occurred while downloading {dest}: {str(e)}")
|
||||
if event_bus:
|
||||
event_bus.emit_download_completed(url,500,None)
|
||||
return None
|
||||
|
||||
if event_bus:
|
||||
event_bus.emit_download_completed(url,resp.status_code,dest)
|
||||
|
||||
return dest
|
||||
|
||||
|
||||
|
||||
@@ -1,10 +1,4 @@
|
||||
# This file predefines a few models that the user may want to install.
|
||||
sd-1/main/stable-diffusion-xdl-base:
|
||||
description: Stable Diffusion XL base model - NOT YET RELEASED!! (70 GB)
|
||||
repo_id: stabilityai/stable-diffusion-xl-base
|
||||
sd-1/main/stable-diffusion-xdl-refiner:
|
||||
description: Stable Diffusion XL refiner model - NOT YET RELEASED!! (60 GB)
|
||||
repo_id: stabilityai/stable-diffusion-xl-refiner
|
||||
sd-1/main/stable-diffusion-v1-5:
|
||||
description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
|
||||
repo_id: runwayml/stable-diffusion-v1-5
|
||||
@@ -22,6 +16,14 @@ sd-2/main/stable-diffusion-2-inpainting:
|
||||
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
|
||||
repo_id: stabilityai/stable-diffusion-2-inpainting
|
||||
recommended: False
|
||||
sdxl/main/stable-diffusion-xl-base-0-9:
|
||||
description: Stable Diffusion XL base model (12 GB; access token required)
|
||||
repo_id: stabilityai/stable-diffusion-xl-base-0.9
|
||||
recommended: False
|
||||
sdxl-refiner/main/stable-diffusion-xl-refiner-0-9:
|
||||
description: Stable Diffusion XL refiner model (12 GB; access token required)
|
||||
repo_id: stabilityai/stable-diffusion-xl-refiner-0.9
|
||||
recommended: False
|
||||
sd-1/main/Analog-Diffusion:
|
||||
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
||||
repo_id: wavymulder/Analog-Diffusion
|
||||
|
||||
169
invokeai/frontend/web/dist/assets/App-06ea4e5e.js
vendored
Normal file
199
invokeai/frontend/web/dist/assets/App-196ba8f8.js
vendored
169
invokeai/frontend/web/dist/assets/App-3986879c.js
vendored
1
invokeai/frontend/web/dist/assets/MantineProvider-b5842fc1.js
vendored
Normal file
9
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-5b992bc7.css
vendored
Normal file
302
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-a6269375.js
vendored
Normal file
125
invokeai/frontend/web/dist/assets/index-15b43c6c.js
vendored
125
invokeai/frontend/web/dist/assets/index-e2437518.js
vendored
Normal file
125
invokeai/frontend/web/dist/assets/index-f1a5f9cf.js
vendored
2
invokeai/frontend/web/dist/index.html
vendored
@@ -12,7 +12,7 @@
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
<script type="module" crossorigin src="./assets/index-f1a5f9cf.js"></script>
|
||||
<script type="module" crossorigin src="./assets/index-e2437518.js"></script>
|
||||
</head>
|
||||
|
||||
<body dir="ltr">
|
||||
|
||||
33
invokeai/frontend/web/dist/locales/en.json
vendored
@@ -399,6 +399,8 @@
|
||||
"deleteModel": "Delete Model",
|
||||
"deleteConfig": "Delete Config",
|
||||
"deleteMsg1": "Are you sure you want to delete this model from InvokeAI?",
|
||||
"modelDeleted": "Model Deleted",
|
||||
"modelDeleteFailed": "Failed to delete model",
|
||||
"deleteMsg2": "This WILL delete the model from disk if it is in the InvokeAI root folder. If you are using a custom location, then the model WILL NOT be deleted from disk.",
|
||||
"formMessageDiffusersModelLocation": "Diffusers Model Location",
|
||||
"formMessageDiffusersModelLocationDesc": "Please enter at least one.",
|
||||
@@ -408,11 +410,13 @@
|
||||
"convertToDiffusers": "Convert To Diffusers",
|
||||
"convertToDiffusersHelpText1": "This model will be converted to the 🧨 Diffusers format.",
|
||||
"convertToDiffusersHelpText2": "This process will replace your Model Manager entry with the Diffusers version of the same model.",
|
||||
"convertToDiffusersHelpText3": "Your checkpoint file on the disk will NOT be deleted or modified in anyway. You can add your checkpoint to the Model Manager again if you want to.",
|
||||
"convertToDiffusersHelpText3": "Your checkpoint file on disk WILL be deleted if it is in InvokeAI root folder. If it is in a custom location, then it WILL NOT be deleted.",
|
||||
"convertToDiffusersHelpText4": "This is a one time process only. It might take around 30s-60s depending on the specifications of your computer.",
|
||||
"convertToDiffusersHelpText5": "Please make sure you have enough disk space. Models generally vary between 2GB-7GB in size.",
|
||||
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
|
||||
"convertToDiffusersSaveLocation": "Save Location",
|
||||
"noCustomLocationProvided": "No Custom Location Provided",
|
||||
"convertingModelBegin": "Converting Model. Please wait.",
|
||||
"v1": "v1",
|
||||
"v2_base": "v2 (512px)",
|
||||
"v2_768": "v2 (768px)",
|
||||
@@ -450,7 +454,13 @@
|
||||
"none": "none",
|
||||
"addDifference": "Add Difference",
|
||||
"pickModelType": "Pick Model Type",
|
||||
"selectModel": "Select Model"
|
||||
"selectModel": "Select Model",
|
||||
"importModels": "Import Models",
|
||||
"settings": "Settings",
|
||||
"syncModels": "Sync Models",
|
||||
"syncModelsDesc": "If your models are out of sync with the backend, you can refresh them up using this option. This is generally handy in cases where you manually update your models.yaml file or add models to the InvokeAI root folder after the application has booted.",
|
||||
"modelsSynced": "Models Synced",
|
||||
"modelSyncFailed": "Model Sync Failed"
|
||||
},
|
||||
"parameters": {
|
||||
"general": "General",
|
||||
@@ -542,7 +552,8 @@
|
||||
"saveSteps": "Save images every n steps",
|
||||
"confirmOnDelete": "Confirm On Delete",
|
||||
"displayHelpIcons": "Display Help Icons",
|
||||
"useCanvasBeta": "Use Canvas Beta Layout",
|
||||
"alternateCanvasLayout": "Alternate Canvas Layout",
|
||||
"enableNodesEditor": "Enable Nodes Editor",
|
||||
"enableImageDebugging": "Enable Image Debugging",
|
||||
"useSlidersForAll": "Use Sliders For All Options",
|
||||
"showProgressInViewer": "Show Progress Images in Viewer",
|
||||
@@ -559,7 +570,9 @@
|
||||
"ui": "User Interface",
|
||||
"favoriteSchedulers": "Favorite Schedulers",
|
||||
"favoriteSchedulersPlaceholder": "No schedulers favorited",
|
||||
"showAdvancedOptions": "Show Advanced Options"
|
||||
"showAdvancedOptions": "Show Advanced Options",
|
||||
"experimental": "Experimental",
|
||||
"beta": "Beta"
|
||||
},
|
||||
"toast": {
|
||||
"serverError": "Server Error",
|
||||
@@ -572,6 +585,7 @@
|
||||
"uploadFailedInvalidUploadDesc": "Must be single PNG or JPEG image",
|
||||
"downloadImageStarted": "Image Download Started",
|
||||
"imageCopied": "Image Copied",
|
||||
"problemCopyingImage": "Unable to Copy Image",
|
||||
"imageLinkCopied": "Image Link Copied",
|
||||
"problemCopyingImageLink": "Unable to Copy Image Link",
|
||||
"imageNotLoaded": "No Image Loaded",
|
||||
@@ -688,6 +702,15 @@
|
||||
"reloadSchema": "Reload Schema",
|
||||
"saveNodes": "Save Nodes",
|
||||
"loadNodes": "Load Nodes",
|
||||
"clearNodes": "Clear Nodes"
|
||||
"clearNodes": "Clear Nodes",
|
||||
"zoomInNodes": "Zoom In",
|
||||
"zoomOutNodes": "Zoom Out",
|
||||
"fitViewportNodes": "Fit View",
|
||||
"hideGraphNodes": "Hide Graph Overlay",
|
||||
"showGraphNodes": "Show Graph Overlay",
|
||||
"hideLegendNodes": "Hide Field Type Legend",
|
||||
"showLegendNodes": "Show Field Type Legend",
|
||||
"hideMinimapnodes": "Hide MiniMap",
|
||||
"showMinimapnodes": "Show MiniMap"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -455,7 +455,12 @@
|
||||
"addDifference": "Add Difference",
|
||||
"pickModelType": "Pick Model Type",
|
||||
"selectModel": "Select Model",
|
||||
"importModels": "Import Models"
|
||||
"importModels": "Import Models",
|
||||
"settings": "Settings",
|
||||
"syncModels": "Sync Models",
|
||||
"syncModelsDesc": "If your models are out of sync with the backend, you can refresh them up using this option. This is generally handy in cases where you manually update your models.yaml file or add models to the InvokeAI root folder after the application has booted.",
|
||||
"modelsSynced": "Models Synced",
|
||||
"modelSyncFailed": "Model Sync Failed"
|
||||
},
|
||||
"parameters": {
|
||||
"general": "General",
|
||||
@@ -547,7 +552,8 @@
|
||||
"saveSteps": "Save images every n steps",
|
||||
"confirmOnDelete": "Confirm On Delete",
|
||||
"displayHelpIcons": "Display Help Icons",
|
||||
"useCanvasBeta": "Use Canvas Beta Layout",
|
||||
"alternateCanvasLayout": "Alternate Canvas Layout",
|
||||
"enableNodesEditor": "Enable Nodes Editor",
|
||||
"enableImageDebugging": "Enable Image Debugging",
|
||||
"useSlidersForAll": "Use Sliders For All Options",
|
||||
"showProgressInViewer": "Show Progress Images in Viewer",
|
||||
@@ -564,7 +570,9 @@
|
||||
"ui": "User Interface",
|
||||
"favoriteSchedulers": "Favorite Schedulers",
|
||||
"favoriteSchedulersPlaceholder": "No schedulers favorited",
|
||||
"showAdvancedOptions": "Show Advanced Options"
|
||||
"showAdvancedOptions": "Show Advanced Options",
|
||||
"experimental": "Experimental",
|
||||
"beta": "Beta"
|
||||
},
|
||||
"toast": {
|
||||
"serverError": "Server Error",
|
||||
|
||||
@@ -15,7 +15,6 @@ import InvokeTabs from 'features/ui/components/InvokeTabs';
|
||||
import ParametersDrawer from 'features/ui/components/ParametersDrawer';
|
||||
import i18n from 'i18n';
|
||||
import { ReactNode, memo, useEffect } from 'react';
|
||||
import DeleteBoardImagesModal from '../../features/gallery/components/Boards/DeleteBoardImagesModal';
|
||||
import UpdateImageBoardModal from '../../features/gallery/components/Boards/UpdateImageBoardModal';
|
||||
import GlobalHotkeys from './GlobalHotkeys';
|
||||
import Toaster from './Toaster';
|
||||
@@ -84,7 +83,6 @@ const App = ({ config = DEFAULT_CONFIG, headerComponent }: Props) => {
|
||||
</Grid>
|
||||
<DeleteImageModal />
|
||||
<UpdateImageBoardModal />
|
||||
<DeleteBoardImagesModal />
|
||||
<Toaster />
|
||||
<GlobalHotkeys />
|
||||
</>
|
||||
|
||||
@@ -15,10 +15,7 @@ const STYLES: ChakraProps['sx'] = {
|
||||
maxH: BOX_SIZE,
|
||||
shadow: 'dark-lg',
|
||||
borderRadius: 'lg',
|
||||
borderWidth: 2,
|
||||
borderStyle: 'dashed',
|
||||
borderColor: 'base.100',
|
||||
opacity: 0.5,
|
||||
opacity: 0.3,
|
||||
bg: 'base.800',
|
||||
color: 'base.50',
|
||||
_dark: {
|
||||
|
||||
@@ -28,6 +28,7 @@ const ImageDndContext = (props: ImageDndContextProps) => {
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const handleDragStart = useCallback((event: DragStartEvent) => {
|
||||
console.log('dragStart', event.active.data.current);
|
||||
const activeData = event.active.data.current;
|
||||
if (!activeData) {
|
||||
return;
|
||||
@@ -37,15 +38,16 @@ const ImageDndContext = (props: ImageDndContextProps) => {
|
||||
|
||||
const handleDragEnd = useCallback(
|
||||
(event: DragEndEvent) => {
|
||||
console.log('dragEnd', event.active.data.current);
|
||||
const activeData = event.active.data.current;
|
||||
const overData = event.over?.data.current;
|
||||
if (!activeData || !overData) {
|
||||
if (!activeDragData || !overData) {
|
||||
return;
|
||||
}
|
||||
dispatch(dndDropped({ overData, activeData }));
|
||||
dispatch(dndDropped({ overData, activeData: activeDragData }));
|
||||
setActiveDragData(null);
|
||||
},
|
||||
[dispatch]
|
||||
[activeDragData, dispatch]
|
||||
);
|
||||
|
||||
const mouseSensor = useSensor(MouseSensor, {
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
useDraggable as useOriginalDraggable,
|
||||
useDroppable as useOriginalDroppable,
|
||||
} from '@dnd-kit/core';
|
||||
import { BoardId } from 'features/gallery/store/gallerySlice';
|
||||
import { ImageDTO } from 'services/api/types';
|
||||
|
||||
type BaseDropData = {
|
||||
@@ -55,7 +56,7 @@ export type AddToBatchDropData = BaseDropData & {
|
||||
|
||||
export type MoveBoardDropData = BaseDropData & {
|
||||
actionType: 'MOVE_BOARD';
|
||||
context: { boardId: string | null };
|
||||
context: { boardId: BoardId };
|
||||
};
|
||||
|
||||
export type TypesafeDroppableData =
|
||||
@@ -158,8 +159,34 @@ export const isValidDrop = (
|
||||
return payloadType === 'IMAGE_DTO' || 'IMAGE_NAMES';
|
||||
case 'ADD_TO_BATCH':
|
||||
return payloadType === 'IMAGE_DTO' || 'IMAGE_NAMES';
|
||||
case 'MOVE_BOARD':
|
||||
return payloadType === 'IMAGE_DTO' || 'IMAGE_NAMES';
|
||||
case 'MOVE_BOARD': {
|
||||
// If the board is the same, don't allow the drop
|
||||
|
||||
// Check the payload types
|
||||
const isPayloadValid = payloadType === 'IMAGE_DTO' || 'IMAGE_NAMES';
|
||||
if (!isPayloadValid) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if the image's board is the board we are dragging onto
|
||||
if (payloadType === 'IMAGE_DTO') {
|
||||
const { imageDTO } = active.data.current.payload;
|
||||
const currentBoard = imageDTO.board_id;
|
||||
const destinationBoard = overData.context.boardId;
|
||||
|
||||
const isSameBoard = currentBoard === destinationBoard;
|
||||
const isDestinationValid = !currentBoard ? destinationBoard : true;
|
||||
|
||||
return !isSameBoard && isDestinationValid;
|
||||
}
|
||||
|
||||
if (payloadType === 'IMAGE_NAMES') {
|
||||
// TODO (multi-select)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import { Middleware } from '@reduxjs/toolkit';
|
||||
import ImageDndContext from './ImageDnd/ImageDndContext';
|
||||
import { AddImageToBoardContextProvider } from '../contexts/AddImageToBoardContext';
|
||||
import { $authToken, $baseUrl } from 'services/api/client';
|
||||
import { DeleteBoardImagesContextProvider } from '../contexts/DeleteBoardImagesContext';
|
||||
|
||||
const App = lazy(() => import('./App'));
|
||||
const ThemeLocaleProvider = lazy(() => import('./ThemeLocaleProvider'));
|
||||
@@ -78,9 +77,7 @@ const InvokeAIUI = ({
|
||||
<ThemeLocaleProvider>
|
||||
<ImageDndContext>
|
||||
<AddImageToBoardContextProvider>
|
||||
<DeleteBoardImagesContextProvider>
|
||||
<App config={config} headerComponent={headerComponent} />
|
||||
</DeleteBoardImagesContextProvider>
|
||||
<App config={config} headerComponent={headerComponent} />
|
||||
</AddImageToBoardContextProvider>
|
||||
</ImageDndContext>
|
||||
</ThemeLocaleProvider>
|
||||
|
||||
@@ -59,15 +59,8 @@ export const SCHEDULER_LABEL_MAP: Record<SchedulerParam, string> = {
|
||||
|
||||
export type Scheduler = (typeof SCHEDULER_NAMES)[number];
|
||||
|
||||
// Valid upscaling levels
|
||||
export const UPSCALING_LEVELS: Array<{ label: string; value: string }> = [
|
||||
{ label: '2x', value: '2' },
|
||||
{ label: '4x', value: '4' },
|
||||
];
|
||||
export const NUMPY_RAND_MIN = 0;
|
||||
|
||||
export const NUMPY_RAND_MAX = 2147483647;
|
||||
|
||||
export const FACETOOL_TYPES = ['gfpgan', 'codeformer'] as const;
|
||||
|
||||
export const NODE_MIN_WIDTH = 250;
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { useDisclosure } from '@chakra-ui/react';
|
||||
import { PropsWithChildren, createContext, useCallback, useState } from 'react';
|
||||
import { ImageDTO } from 'services/api/types';
|
||||
import { useAddImageToBoardMutation } from 'services/api/endpoints/boardImages';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { useAppDispatch } from '../store/storeHooks';
|
||||
|
||||
export type ImageUsage = {
|
||||
isInitialImage: boolean;
|
||||
@@ -40,8 +41,7 @@ type Props = PropsWithChildren;
|
||||
export const AddImageToBoardContextProvider = (props: Props) => {
|
||||
const [imageToMove, setImageToMove] = useState<ImageDTO>();
|
||||
const { isOpen, onOpen, onClose } = useDisclosure();
|
||||
|
||||
const [addImageToBoard, result] = useAddImageToBoardMutation();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
// Clean up after deleting or dismissing the modal
|
||||
const closeAndClearImageToDelete = useCallback(() => {
|
||||
@@ -63,14 +63,16 @@ export const AddImageToBoardContextProvider = (props: Props) => {
|
||||
const handleAddToBoard = useCallback(
|
||||
(boardId: string) => {
|
||||
if (imageToMove) {
|
||||
addImageToBoard({
|
||||
board_id: boardId,
|
||||
image_name: imageToMove.image_name,
|
||||
});
|
||||
dispatch(
|
||||
imagesApi.endpoints.addImageToBoard.initiate({
|
||||
imageDTO: imageToMove,
|
||||
board_id: boardId,
|
||||
})
|
||||
);
|
||||
closeAndClearImageToDelete();
|
||||
}
|
||||
},
|
||||
[addImageToBoard, closeAndClearImageToDelete, imageToMove]
|
||||
[dispatch, closeAndClearImageToDelete, imageToMove]
|
||||
);
|
||||
|
||||
return (
|
||||
|
||||
@@ -1,170 +0,0 @@
|
||||
import { useDisclosure } from '@chakra-ui/react';
|
||||
import { PropsWithChildren, createContext, useCallback, useState } from 'react';
|
||||
import { BoardDTO } from 'services/api/types';
|
||||
import { useDeleteBoardMutation } from '../../services/api/endpoints/boards';
|
||||
import { defaultSelectorOptions } from '../store/util/defaultMemoizeOptions';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { some } from 'lodash-es';
|
||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||
import { controlNetSelector } from 'features/controlNet/store/controlNetSlice';
|
||||
import { selectImagesById } from 'features/gallery/store/gallerySlice';
|
||||
import { nodesSelector } from 'features/nodes/store/nodesSlice';
|
||||
import { generationSelector } from 'features/parameters/store/generationSelectors';
|
||||
import { RootState } from '../store/store';
|
||||
import { useAppDispatch, useAppSelector } from '../store/storeHooks';
|
||||
import { ImageUsage } from './DeleteImageContext';
|
||||
import { requestedBoardImagesDeletion } from 'features/gallery/store/actions';
|
||||
|
||||
export const selectBoardImagesUsage = createSelector(
|
||||
[
|
||||
(state: RootState) => state,
|
||||
generationSelector,
|
||||
canvasSelector,
|
||||
nodesSelector,
|
||||
controlNetSelector,
|
||||
(state: RootState, board_id?: string) => board_id,
|
||||
],
|
||||
(state, generation, canvas, nodes, controlNet, board_id) => {
|
||||
const initialImage = generation.initialImage
|
||||
? selectImagesById(state, generation.initialImage.imageName)
|
||||
: undefined;
|
||||
const isInitialImage = initialImage?.board_id === board_id;
|
||||
|
||||
const isCanvasImage = canvas.layerState.objects.some((obj) => {
|
||||
if (obj.kind === 'image') {
|
||||
const image = selectImagesById(state, obj.imageName);
|
||||
return image?.board_id === board_id;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
const isNodesImage = nodes.nodes.some((node) => {
|
||||
return some(node.data.inputs, (input) => {
|
||||
if (input.type === 'image' && input.value) {
|
||||
const image = selectImagesById(state, input.value.image_name);
|
||||
return image?.board_id === board_id;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
});
|
||||
|
||||
const isControlNetImage = some(controlNet.controlNets, (c) => {
|
||||
const controlImage = c.controlImage
|
||||
? selectImagesById(state, c.controlImage)
|
||||
: undefined;
|
||||
const processedControlImage = c.processedControlImage
|
||||
? selectImagesById(state, c.processedControlImage)
|
||||
: undefined;
|
||||
return (
|
||||
controlImage?.board_id === board_id ||
|
||||
processedControlImage?.board_id === board_id
|
||||
);
|
||||
});
|
||||
|
||||
const imageUsage: ImageUsage = {
|
||||
isInitialImage,
|
||||
isCanvasImage,
|
||||
isNodesImage,
|
||||
isControlNetImage,
|
||||
};
|
||||
|
||||
return imageUsage;
|
||||
},
|
||||
defaultSelectorOptions
|
||||
);
|
||||
|
||||
type DeleteBoardImagesContextValue = {
|
||||
/**
|
||||
* Whether the move image dialog is open.
|
||||
*/
|
||||
isOpen: boolean;
|
||||
/**
|
||||
* Closes the move image dialog.
|
||||
*/
|
||||
onClose: () => void;
|
||||
imagesUsage?: ImageUsage;
|
||||
board?: BoardDTO;
|
||||
onClickDeleteBoardImages: (board: BoardDTO) => void;
|
||||
handleDeleteBoardImages: (boardId: string) => void;
|
||||
handleDeleteBoardOnly: (boardId: string) => void;
|
||||
};
|
||||
|
||||
export const DeleteBoardImagesContext =
|
||||
createContext<DeleteBoardImagesContextValue>({
|
||||
isOpen: false,
|
||||
onClose: () => undefined,
|
||||
onClickDeleteBoardImages: () => undefined,
|
||||
handleDeleteBoardImages: () => undefined,
|
||||
handleDeleteBoardOnly: () => undefined,
|
||||
});
|
||||
|
||||
type Props = PropsWithChildren;
|
||||
|
||||
export const DeleteBoardImagesContextProvider = (props: Props) => {
|
||||
const [boardToDelete, setBoardToDelete] = useState<BoardDTO>();
|
||||
const { isOpen, onOpen, onClose } = useDisclosure();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
// Check where the board images to be deleted are used (eg init image, controlnet, etc.)
|
||||
const imagesUsage = useAppSelector((state) =>
|
||||
selectBoardImagesUsage(state, boardToDelete?.board_id)
|
||||
);
|
||||
|
||||
const [deleteBoard] = useDeleteBoardMutation();
|
||||
|
||||
// Clean up after deleting or dismissing the modal
|
||||
const closeAndClearBoardToDelete = useCallback(() => {
|
||||
setBoardToDelete(undefined);
|
||||
onClose();
|
||||
}, [onClose]);
|
||||
|
||||
const onClickDeleteBoardImages = useCallback(
|
||||
(board?: BoardDTO) => {
|
||||
console.log({ board });
|
||||
if (!board) {
|
||||
return;
|
||||
}
|
||||
setBoardToDelete(board);
|
||||
onOpen();
|
||||
},
|
||||
[setBoardToDelete, onOpen]
|
||||
);
|
||||
|
||||
const handleDeleteBoardImages = useCallback(
|
||||
(boardId: string) => {
|
||||
if (boardToDelete) {
|
||||
dispatch(
|
||||
requestedBoardImagesDeletion({ board: boardToDelete, imagesUsage })
|
||||
);
|
||||
closeAndClearBoardToDelete();
|
||||
}
|
||||
},
|
||||
[dispatch, closeAndClearBoardToDelete, boardToDelete, imagesUsage]
|
||||
);
|
||||
|
||||
const handleDeleteBoardOnly = useCallback(
|
||||
(boardId: string) => {
|
||||
if (boardToDelete) {
|
||||
deleteBoard(boardId);
|
||||
closeAndClearBoardToDelete();
|
||||
}
|
||||
},
|
||||
[deleteBoard, closeAndClearBoardToDelete, boardToDelete]
|
||||
);
|
||||
|
||||
return (
|
||||
<DeleteBoardImagesContext.Provider
|
||||
value={{
|
||||
isOpen,
|
||||
board: boardToDelete,
|
||||
onClose: closeAndClearBoardToDelete,
|
||||
onClickDeleteBoardImages,
|
||||
handleDeleteBoardImages,
|
||||
handleDeleteBoardOnly,
|
||||
imagesUsage,
|
||||
}}
|
||||
>
|
||||
{props.children}
|
||||
</DeleteBoardImagesContext.Provider>
|
||||
);
|
||||
};
|
||||
@@ -11,7 +11,7 @@ import { addCommitStagingAreaImageListener } from './listeners/addCommitStagingA
|
||||
import { addAppConfigReceivedListener } from './listeners/appConfigReceived';
|
||||
import { addAppStartedListener } from './listeners/appStarted';
|
||||
import { addBoardIdSelectedListener } from './listeners/boardIdSelected';
|
||||
import { addRequestedBoardImageDeletionListener } from './listeners/boardImagesDeleted';
|
||||
import { addDeleteBoardAndImagesFulfilledListener } from './listeners/boardAndImagesDeleted';
|
||||
import { addCanvasCopiedToClipboardListener } from './listeners/canvasCopiedToClipboard';
|
||||
import { addCanvasDownloadedAsImageListener } from './listeners/canvasDownloadedAsImage';
|
||||
import { addCanvasMergedListener } from './listeners/canvasMerged';
|
||||
@@ -29,10 +29,6 @@ import {
|
||||
addRequestedImageDeletionListener,
|
||||
} from './listeners/imageDeleted';
|
||||
import { addImageDroppedListener } from './listeners/imageDropped';
|
||||
import {
|
||||
addImageMetadataReceivedFulfilledListener,
|
||||
addImageMetadataReceivedRejectedListener,
|
||||
} from './listeners/imageMetadataReceived';
|
||||
import {
|
||||
addImageRemovedFromBoardFulfilledListener,
|
||||
addImageRemovedFromBoardRejectedListener,
|
||||
@@ -46,18 +42,10 @@ import {
|
||||
addImageUploadedFulfilledListener,
|
||||
addImageUploadedRejectedListener,
|
||||
} from './listeners/imageUploaded';
|
||||
import {
|
||||
addImageUrlsReceivedFulfilledListener,
|
||||
addImageUrlsReceivedRejectedListener,
|
||||
} from './listeners/imageUrlsReceived';
|
||||
import { addInitialImageSelectedListener } from './listeners/initialImageSelected';
|
||||
import { addModelSelectedListener } from './listeners/modelSelected';
|
||||
import { addModelsLoadedListener } from './listeners/modelsLoaded';
|
||||
import { addReceivedOpenAPISchemaListener } from './listeners/receivedOpenAPISchema';
|
||||
import {
|
||||
addReceivedPageOfImagesFulfilledListener,
|
||||
addReceivedPageOfImagesRejectedListener,
|
||||
} from './listeners/receivedPageOfImages';
|
||||
import {
|
||||
addSessionCanceledFulfilledListener,
|
||||
addSessionCanceledPendingListener,
|
||||
@@ -90,6 +78,8 @@ import { addUserInvokedNodesListener } from './listeners/userInvokedNodes';
|
||||
import { addUserInvokedTextToImageListener } from './listeners/userInvokedTextToImage';
|
||||
import { addModelLoadStartedEventListener } from './listeners/socketio/socketModelLoadStarted';
|
||||
import { addModelLoadCompletedEventListener } from './listeners/socketio/socketModelLoadCompleted';
|
||||
import { addUpscaleRequestedListener } from './listeners/upscaleRequested';
|
||||
import { addFirstListImagesListener } from './listeners/addFirstListImagesListener.ts';
|
||||
|
||||
export const listenerMiddleware = createListenerMiddleware();
|
||||
|
||||
@@ -131,17 +121,9 @@ addRequestedImageDeletionListener();
|
||||
addImageDeletedPendingListener();
|
||||
addImageDeletedFulfilledListener();
|
||||
addImageDeletedRejectedListener();
|
||||
addRequestedBoardImageDeletionListener();
|
||||
addDeleteBoardAndImagesFulfilledListener();
|
||||
addImageToDeleteSelectedListener();
|
||||
|
||||
// Image metadata
|
||||
addImageMetadataReceivedFulfilledListener();
|
||||
addImageMetadataReceivedRejectedListener();
|
||||
|
||||
// Image URLs
|
||||
addImageUrlsReceivedFulfilledListener();
|
||||
addImageUrlsReceivedRejectedListener();
|
||||
|
||||
// User Invoked
|
||||
addUserInvokedCanvasListener();
|
||||
addUserInvokedNodesListener();
|
||||
@@ -197,17 +179,10 @@ addSessionCanceledPendingListener();
|
||||
addSessionCanceledFulfilledListener();
|
||||
addSessionCanceledRejectedListener();
|
||||
|
||||
// Fetching images
|
||||
addReceivedPageOfImagesFulfilledListener();
|
||||
addReceivedPageOfImagesRejectedListener();
|
||||
|
||||
// ControlNet
|
||||
addControlNetImageProcessedListener();
|
||||
addControlNetAutoProcessListener();
|
||||
|
||||
// Update image URLs on connect
|
||||
// addUpdateImageUrlsOnConnectListener();
|
||||
|
||||
// Boards
|
||||
addImageAddedToBoardFulfilledListener();
|
||||
addImageAddedToBoardRejectedListener();
|
||||
@@ -228,3 +203,7 @@ addModelSelectedListener();
|
||||
addAppStartedListener();
|
||||
addModelsLoadedListener();
|
||||
addAppConfigReceivedListener();
|
||||
addFirstListImagesListener();
|
||||
|
||||
// Ad-hoc upscale workflwo
|
||||
addUpscaleRequestedListener();
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import {
|
||||
IMAGE_CATEGORIES,
|
||||
imageSelected,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import {
|
||||
ImageCache,
|
||||
getListImagesUrl,
|
||||
imagesApi,
|
||||
} from 'services/api/endpoints/images';
|
||||
import { startAppListening } from '..';
|
||||
|
||||
export const appStarted = createAction('app/appStarted');
|
||||
|
||||
export const addFirstListImagesListener = () => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.listImages.matchFulfilled,
|
||||
effect: async (
|
||||
action,
|
||||
{ getState, dispatch, unsubscribe, cancelActiveListeners }
|
||||
) => {
|
||||
// Only run this listener on the first listImages request for no-board images
|
||||
if (
|
||||
action.meta.arg.queryCacheKey !==
|
||||
getListImagesUrl({ board_id: 'none', categories: IMAGE_CATEGORIES })
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
// this should only run once
|
||||
cancelActiveListeners();
|
||||
unsubscribe();
|
||||
|
||||
// TODO: figure out how to type the predicate
|
||||
const data = action.payload as ImageCache;
|
||||
|
||||
if (data.ids.length > 0) {
|
||||
// Select the first image
|
||||
dispatch(imageSelected(data.ids[0] as string));
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||