mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-21 03:28:25 -05:00
Compare commits
57 Commits
v5.6.1rc1
...
ryan/model
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f01e41ceaf | ||
|
|
609ed06265 | ||
|
|
f9e899a6ba | ||
|
|
9262c0ec53 | ||
|
|
7fddb06dc4 | ||
|
|
239297caf6 | ||
|
|
20f0b2f4fa | ||
|
|
cfb8815355 | ||
|
|
c866b5a799 | ||
|
|
3b76812d43 | ||
|
|
a8f3471fc7 | ||
|
|
6d8dee05a9 | ||
|
|
e684e49299 | ||
|
|
4ce2042d65 | ||
|
|
05a50b557a | ||
|
|
85e1e9587e | ||
|
|
8e763e87bb | ||
|
|
4a4360a40c | ||
|
|
612d6b00e3 | ||
|
|
7a5dd084ad | ||
|
|
79a4d0890f | ||
|
|
e0c899104b | ||
|
|
c37bb6375c | ||
|
|
4716170988 | ||
|
|
463196d781 | ||
|
|
e1e756800d | ||
|
|
ab337594b8 | ||
|
|
699e4e5995 | ||
|
|
33f17520ca | ||
|
|
46d061212c | ||
|
|
829dddefc8 | ||
|
|
b6c159cfdb | ||
|
|
5a31c467a3 | ||
|
|
13dbde2429 | ||
|
|
a8ee72d7fb | ||
|
|
7a002e1b05 | ||
|
|
b50dd8502f | ||
|
|
f4c13b057d | ||
|
|
cb884ee567 | ||
|
|
050d4465e6 | ||
|
|
e48bb844b9 | ||
|
|
57eb05983b | ||
|
|
dc3be08653 | ||
|
|
ae1041286f | ||
|
|
6e270cc5bf | ||
|
|
6dc447aba8 | ||
|
|
a4c0fcb6c8 | ||
|
|
1f3580716c | ||
|
|
405e53f80a | ||
|
|
be120ff587 | ||
|
|
f8a3002d34 | ||
|
|
c785282c94 | ||
|
|
f4fd3e0cc9 | ||
|
|
ae04fa5e60 | ||
|
|
838e1e1438 | ||
|
|
e3e8e95da6 | ||
|
|
030832f30b |
85
.github/workflows/typegen-checks.yml
vendored
85
.github/workflows/typegen-checks.yml
vendored
@@ -1,85 +0,0 @@
|
||||
# Runs typegen schema quality checks.
|
||||
# Frontend types should match the server.
|
||||
#
|
||||
# Checks for changes to files before running the checks.
|
||||
# If always_run is true, always runs the checks.
|
||||
|
||||
name: 'typegen checks'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
workflow_call:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
typegen-checks:
|
||||
runs-on: ubuntu-22.04
|
||||
timeout-minutes: 15 # expected run time: <5 min
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: check for changed files
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
files_yaml: |
|
||||
src:
|
||||
- 'pyproject.toml'
|
||||
- 'invokeai/**'
|
||||
|
||||
- name: setup python
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: install python dependencies
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: pip3 install --use-pep517 --editable="."
|
||||
|
||||
- name: install frontend dependencies
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: ./.github/actions/install-frontend-deps
|
||||
|
||||
- name: copy schema
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: cp invokeai/frontend/web/src/services/api/schema.ts invokeai/frontend/web/src/services/api/schema_orig.ts
|
||||
shell: bash
|
||||
|
||||
- name: generate schema
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: make frontend-typegen
|
||||
shell: bash
|
||||
|
||||
- name: compare files
|
||||
if: ${{ steps.changed-files.outputs.src_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: |
|
||||
if ! diff invokeai/frontend/web/src/services/api/schema.ts invokeai/frontend/web/src/services/api/schema_orig.ts; then
|
||||
echo "Files are different!";
|
||||
exit 1;
|
||||
fi
|
||||
shell: bash
|
||||
45
README.md
45
README.md
@@ -30,12 +30,51 @@ Invoke is available in two editions:
|
||||
|----------------------------------------------------------------------------------------------------------------------------|
|
||||
| [Installation and Updates][installation docs] - [Documentation and Tutorials][docs home] - [Bug Reports][github issues] - [Contributing][contributing docs] |
|
||||
|
||||
# Installation
|
||||
</div>
|
||||
|
||||
To get started with Invoke, [Download the Installer](https://www.invoke.com/downloads).
|
||||
## Quick Start
|
||||
|
||||
For detailed step by step instructions, or for instructions on manual/docker installations, visit our documentation on [Installation and Updates][installation docs]
|
||||
1. Download and unzip the installer from the bottom of the [latest release][latest release link].
|
||||
2. Run the installer script.
|
||||
|
||||
- **Windows**: Double-click on the `install.bat` script.
|
||||
- **macOS**: Open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press enter.
|
||||
- **Linux**: Run `install.sh`.
|
||||
|
||||
3. When prompted, enter a location for the install and select your GPU type.
|
||||
4. Once the install finishes, find the directory you selected during install. The default location is `C:\Users\Username\invokeai` for Windows or `~/invokeai` for Linux/macOS.
|
||||
5. Run the launcher script (`invoke.bat` for Windows, `invoke.sh` for macOS and Linux) the same way you ran the installer script in step 2.
|
||||
6. Select option 1 to start the application. Once it starts up, open your browser and go to <http://localhost:9090>.
|
||||
7. Open the model manager tab to install a starter model and then you'll be ready to generate.
|
||||
|
||||
More detail, including hardware requirements and manual install instructions, are available in the [installation documentation][installation docs].
|
||||
|
||||
## Docker Container
|
||||
|
||||
We publish official container images in Github Container Registry: https://github.com/invoke-ai/InvokeAI/pkgs/container/invokeai. Both CUDA and ROCm images are available. Check the above link for relevant tags.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Ensure that Docker is set up to use the GPU. Refer to [NVIDIA][nvidia docker docs] or [AMD][amd docker docs] documentation.
|
||||
|
||||
### Generate!
|
||||
|
||||
Run the container, modifying the command as necessary:
|
||||
|
||||
```bash
|
||||
docker run --runtime=nvidia --gpus=all --publish 9090:9090 ghcr.io/invoke-ai/invokeai
|
||||
```
|
||||
|
||||
Then open `http://localhost:9090` and install some models using the Model Manager tab to begin generating.
|
||||
|
||||
For ROCm, add `--device /dev/kfd --device /dev/dri` to the `docker run` command.
|
||||
|
||||
### Persist your data
|
||||
|
||||
You will likely want to persist your workspace outside of the container. Use the `--volume /home/myuser/invokeai:/invokeai` flag to mount some local directory (using its **absolute** path) to the `/invokeai` path inside the container. Your generated images and models will reside there. You can use this directory with other InvokeAI installations, or switch between runtime directories as needed.
|
||||
|
||||
### DIY
|
||||
|
||||
Build your own image and customize the environment to match your needs using our `docker-compose` stack. See [README.md](./docker/README.md) in the [docker](./docker) directory.
|
||||
|
||||
## Troubleshooting, FAQ and Support
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ It has two sections - one for internal use and one for user settings:
|
||||
|
||||
```yaml
|
||||
# Internal metadata - do not edit:
|
||||
schema_version: 4.0.2
|
||||
schema_version: 4
|
||||
|
||||
# Put user settings here - see https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/:
|
||||
host: 0.0.0.0 # serve the app on your local network
|
||||
@@ -83,10 +83,6 @@ A subset of settings may be specified using CLI args:
|
||||
- `--root`: specify the root directory
|
||||
- `--config`: override the default `invokeai.yaml` file location
|
||||
|
||||
### Low-VRAM Mode
|
||||
|
||||
See the [Low-VRAM mode docs][low-vram] for details on enabling this feature.
|
||||
|
||||
### All Settings
|
||||
|
||||
Following the table are additional explanations for certain settings.
|
||||
@@ -118,10 +114,6 @@ remote_api_tokens:
|
||||
|
||||
The provided token will be added as a `Bearer` token to the network requests to download the model files. As far as we know, this works for all model marketplaces that require authorization.
|
||||
|
||||
!!! tip "HuggingFace Models"
|
||||
|
||||
If you get an error when installing a HF model using a URL instead of repo id, you may need to [set up a HF API token](https://huggingface.co/settings/tokens) and add an entry for it under `remote_api_tokens`. Use `huggingface.co` for `url_regex`.
|
||||
|
||||
#### Model Hashing
|
||||
|
||||
Models are hashed during installation, providing a stable identifier for models across all platforms. Hashing is a one-time operation.
|
||||
@@ -189,4 +181,3 @@ The `log_format` option provides several alternative formats:
|
||||
|
||||
[basic guide to yaml files]: https://circleci.com/blog/what-is-yaml-a-beginner-s-guide/
|
||||
[Model Marketplace API Keys]: #model-marketplace-api-keys
|
||||
[low-vram]: ./features/low-vram.md
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
# Dev Environment
|
||||
|
||||
To make changes to Invoke's backend, frontend or documentation, you'll need to set up a dev environment.
|
||||
To make changes to Invoke's backend, frontend, or documentation, you'll need to set up a dev environment.
|
||||
|
||||
If you only want to make changes to the docs site, you can skip the frontend dev environment setup as described in the below guide.
|
||||
If you just want to use Invoke, you should use the [installer][installer link].
|
||||
|
||||
If you just want to use Invoke, you should use the [launcher][launcher link].
|
||||
!!! info "Why do I need the frontend toolchain?"
|
||||
|
||||
The repo doesn't contain a build of the frontend. You'll be responsible for rebuilding it every time you pull in new changes, or run it in dev mode (which incurs a substantial performance penalty).
|
||||
|
||||
!!! warning
|
||||
|
||||
@@ -15,66 +17,84 @@ If you just want to use Invoke, you should use the [launcher][launcher link].
|
||||
## Setup
|
||||
|
||||
1. Run through the [requirements][requirements link].
|
||||
|
||||
2. [Fork and clone][forking link] the [InvokeAI repo][repo link].
|
||||
|
||||
3. Create an directory for user data (images, models, db, etc). This is typically at `~/invokeai`, but if you already have a non-dev install, you may want to create a separate directory for the dev install.
|
||||
|
||||
4. Follow the [manual install][manual install link] guide, with some modifications to the install command:
|
||||
|
||||
- Use `.` instead of `invokeai` to install from the current directory. You don't need to specify the version.
|
||||
|
||||
- Add `-e` after the `install` operation to make this an [editable install][editable install link]. That means your changes to the python code will be reflected when you restart the Invoke server.
|
||||
|
||||
- When installing the `invokeai` package, add the `dev`, `test` and `docs` package options to the package specifier. You may or may not need the `xformers` option - follow the manual install guide to figure that out. So, your package specifier will be either `".[dev,test,docs]"` or `".[dev,test,docs,xformers]"`. Note the quotes!
|
||||
|
||||
With the modifications made, the install command should look something like this:
|
||||
4. Create a python virtual environment inside the directory you just created:
|
||||
|
||||
```sh
|
||||
uv pip install -e ".[dev,test,docs,xformers]" --python 3.11 --python-preference only-managed --index=https://download.pytorch.org/whl/cu124 --reinstall
|
||||
python3 -m venv .venv --prompt InvokeAI-Dev
|
||||
```
|
||||
|
||||
5. At this point, you should have Invoke installed, a venv set up and activated, and the server running. But you will see a warning in the terminal that no UI was found. If you go to the URL for the server, you won't get a UI.
|
||||
|
||||
This is because the UI build is not distributed with the source code. You need to build it manually. End the running server instance.
|
||||
|
||||
If you only want to edit the docs, you can stop here and skip to the **Documentation** section below.
|
||||
|
||||
6. Install the frontend dev toolchain:
|
||||
|
||||
- [`nodejs`](https://nodejs.org/) (v20+)
|
||||
|
||||
- [`pnpm`](https://pnpm.io/8.x/installation) (must be v8 - not v9!)
|
||||
|
||||
7. Do a production build of the frontend:
|
||||
5. Activate the venv (you'll need to do this every time you want to run the app):
|
||||
|
||||
```sh
|
||||
cd <PATH_TO_INVOKEAI_REPO>/invokeai/frontend/web
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
6. Install the repo as an [editable install][editable install link]:
|
||||
|
||||
```sh
|
||||
pip install -e ".[dev,test,xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||
```
|
||||
|
||||
Refer to the [manual installation][manual install link] instructions for more determining the correct install options. `xformers` is optional, but `dev` and `test` are not.
|
||||
|
||||
7. Install the frontend dev toolchain:
|
||||
|
||||
- [`nodejs`](https://nodejs.org/) (recommend v20 LTS)
|
||||
- [`pnpm`](https://pnpm.io/8.x/installation) (must be v8 - not v9!)
|
||||
|
||||
8. Do a production build of the frontend:
|
||||
|
||||
```sh
|
||||
cd PATH_TO_INVOKEAI_REPO/invokeai/frontend/web
|
||||
pnpm i
|
||||
pnpm build
|
||||
```
|
||||
|
||||
8. Restart the server and navigate to the URL. You should get a UI. After making changes to the python code, restart the server to see those changes.
|
||||
9. Start the application:
|
||||
|
||||
```sh
|
||||
cd PATH_TO_INVOKEAI_REPO
|
||||
python scripts/invokeai-web.py
|
||||
```
|
||||
|
||||
10. Access the UI at `localhost:9090`.
|
||||
|
||||
## Updating the UI
|
||||
|
||||
You'll need to run `pnpm build` every time you pull in new changes.
|
||||
|
||||
Another option is to skip the build and instead run the UI in dev mode:
|
||||
You'll need to run `pnpm build` every time you pull in new changes. Another option is to skip the build and instead run the app in dev mode:
|
||||
|
||||
```sh
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
This starts a vite dev server for the UI at `127.0.0.1:5173`, which you will use instead of `127.0.0.1:9090`.
|
||||
This starts a dev server at `localhost:5173`, which you will use instead of `localhost:9090`.
|
||||
|
||||
The dev mode is substantially slower than the production build but may be more convenient if you just need to test things out. It will hot-reload the UI as you make changes to the frontend code. Sometimes the hot-reload doesn't work, and you need to manually refresh the browser tab.
|
||||
The dev mode is substantially slower than the production build but may be more convenient if you just need to test things out.
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is built with `mkdocs`. It provides a hot-reload dev server for the docs. Start it with `mkdocs serve`.
|
||||
The documentation is built with `mkdocs`. To preview it locally, you need a additional set of packages installed.
|
||||
|
||||
[launcher link]: ../installation/quick_start.md
|
||||
```sh
|
||||
# after activating the venv
|
||||
pip install -e ".[docs]"
|
||||
```
|
||||
|
||||
Then, you can start a live docs dev server, which will auto-refresh when you edit the docs:
|
||||
|
||||
```sh
|
||||
mkdocs serve
|
||||
```
|
||||
|
||||
On macOS and Linux, there is a `make` target for this:
|
||||
|
||||
```sh
|
||||
make docs
|
||||
```
|
||||
|
||||
[installer link]: ../installation/installer.md
|
||||
[forking link]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo
|
||||
[requirements link]: ../installation/requirements.md
|
||||
[repo link]: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 72 KiB |
@@ -1,163 +0,0 @@
|
||||
---
|
||||
title: Low-VRAM mode
|
||||
---
|
||||
|
||||
As of v5.6.0, Invoke has a low-VRAM mode. It works on systems with dedicated GPUs (Nvidia GPUs on Windows/Linux and AMD GPUs on Linux).
|
||||
|
||||
This allows you to generate even if your GPU doesn't have enough VRAM to hold full models. Most users should be able to run even the beefiest models - like the ~24GB unquantised FLUX dev model.
|
||||
|
||||
## Enabling Low-VRAM mode
|
||||
|
||||
To enable Low-VRAM mode, add this line to your `invokeai.yaml` configuration file, then restart Invoke:
|
||||
|
||||
```yaml
|
||||
enable_partial_loading: true
|
||||
```
|
||||
|
||||
**Windows users should also [disable the Nvidia sysmem fallback](#disabling-nvidia-sysmem-fallback-windows-only)**.
|
||||
|
||||
It is possible to fine-tune the settings for best performance or if you still get out-of-memory errors (OOMs).
|
||||
|
||||
!!! tip "How to find `invokeai.yaml`"
|
||||
|
||||
The `invokeai.yaml` configuration file lives in your install directory. To access it, run the **Invoke Community Edition** launcher and click the install location. This will open your install directory in a file explorer window.
|
||||
|
||||
You'll see `invokeai.yaml` there and can edit it with any text editor. After making changes, restart Invoke.
|
||||
|
||||
If you don't see `invokeai.yaml`, launch Invoke once. It will create the file on its first startup.
|
||||
|
||||
## Details and fine-tuning
|
||||
|
||||
Low-VRAM mode involves 4 features, each of which can be configured or fine-tuned:
|
||||
|
||||
- Partial model loading (`enable_partial_loading`)
|
||||
- Dynamic RAM and VRAM cache sizes (`max_cache_ram_gb`, `max_cache_vram_gb`)
|
||||
- Working memory (`device_working_mem_gb`)
|
||||
- Keeping a RAM weight copy (`keep_ram_copy_of_weights`)
|
||||
|
||||
Read on to learn about these features and understand how to fine-tune them for your system and use-cases.
|
||||
|
||||
### Partial model loading
|
||||
|
||||
Invoke's partial model loading works by streaming model "layers" between RAM and VRAM as they are needed.
|
||||
|
||||
When an operation needs layers that are not in VRAM, but there isn't enough room to load them, inactive layers are offloaded to RAM to make room.
|
||||
|
||||
#### Enabling partial model loading
|
||||
|
||||
As described above, you can enable partial model loading by adding this line to `invokeai.yaml`:
|
||||
|
||||
```yaml
|
||||
enable_partial_loading: true
|
||||
```
|
||||
|
||||
### Dynamic RAM and VRAM cache sizes
|
||||
|
||||
Loading models from disk is slow and can be a major bottleneck for performance. Invoke uses two model caches - RAM and VRAM - to reduce loading from disk to a minimum.
|
||||
|
||||
By default, Invoke manages these caches' sizes dynamically for best performance.
|
||||
|
||||
#### Fine-tuning cache sizes
|
||||
|
||||
Prior to v5.6.0, the cache sizes were static, and for best performance, many users needed to manually fine-tune the `ram` and `vram` settings in `invokeai.yaml`.
|
||||
|
||||
As of v5.6.0, the caches are dynamically sized. The `ram` and `vram` settings are no longer used, and new settings are added to configure the cache.
|
||||
|
||||
**Most users will not need to fine-tune the cache sizes.**
|
||||
|
||||
But, if your GPU has enough VRAM to hold models fully, you might get a perf boost by manually setting the cache sizes in `invokeai.yaml`:
|
||||
|
||||
```yaml
|
||||
# The default max cache RAM size is logged on InvokeAI startup. It is determined based on your system RAM / VRAM.
|
||||
# You can override the default value by setting `max_cache_ram_gb`.
|
||||
# Increasing `max_cache_ram_gb` will increase the amount of RAM used to cache inactive models, resulting in faster model
|
||||
# reloads for the cached models.
|
||||
# As an example, if your system has 32GB of RAM and no other heavy processes, setting the `max_cache_ram_gb` to 28GB
|
||||
# might be a good value to achieve aggressive model caching.
|
||||
max_cache_ram_gb: 28
|
||||
# The default max cache VRAM size is adjusted dynamically based on the amount of available VRAM (taking into
|
||||
# consideration the VRAM used by other processes).
|
||||
# You can override the default value by setting `max_cache_vram_gb`. Note that this value takes precedence over the
|
||||
# `device_working_mem_gb`.
|
||||
# It is recommended to set the VRAM cache size to be as large as possible while leaving enough room for the working
|
||||
# memory of the tasks you will be doing. For example, on a 24GB GPU that will be running unquantized FLUX without any
|
||||
# auxiliary models, 18GB might be a good value.
|
||||
max_cache_vram_gb: 18
|
||||
```
|
||||
|
||||
!!! tip "Max safe value for `max_cache_vram_gb`"
|
||||
|
||||
To determine the max safe value for `max_cache_vram_gb`, subtract `device_working_mem_gb` from your GPU's VRAM. As described below, the default for `device_working_mem_gb` is 3GB.
|
||||
|
||||
For example, if you have a 12GB GPU, the max safe value for `max_cache_vram_gb` is `12GB - 3GB = 9GB`.
|
||||
|
||||
If you had increased `device_working_mem_gb` to 4GB, then the max safe value for `max_cache_vram_gb` is `12GB - 4GB = 8GB`.
|
||||
|
||||
### Working memory
|
||||
|
||||
Invoke cannot use _all_ of your VRAM for model caching and loading. It requires some VRAM to use as working memory for various operations.
|
||||
|
||||
Invoke reserves 3GB VRAM as working memory by default, which is enough for most use-cases. However, it is possible to fine-tune this setting if you still get OOMs.
|
||||
|
||||
#### Fine-tuning working memory
|
||||
|
||||
You can increase the working memory size in `invokeai.yaml` to prevent OOMs:
|
||||
|
||||
```yaml
|
||||
# The default is 3GB - bump it up to 4GB to prevent OOMs.
|
||||
device_working_mem_gb: 4
|
||||
```
|
||||
|
||||
!!! tip "Operations may request more working memory"
|
||||
|
||||
For some operations, we can determine VRAM requirements in advance and allocate additional working memory to prevent OOMs.
|
||||
|
||||
VAE decoding is one such operation. This operation converts the generation process's output into an image. For large image outputs, this might use more than the default working memory size of 3GB.
|
||||
|
||||
During this decoding step, Invoke calculates how much VRAM will be required to decode and requests that much VRAM from the model manager. If the amount exceeds the working memory size, the model manager will offload cached model layers from VRAM until there's enough VRAM to decode.
|
||||
|
||||
Once decoding completes, the model manager "reclaims" the extra VRAM allocated as working memory for future model loading operations.
|
||||
|
||||
### Keeping a RAM weight copy
|
||||
|
||||
Invoke has the option of keeping a RAM copy of all model weights, even when they are loaded onto the GPU. This optimization is _on_ by default, and enables faster model switching and LoRA patching. Disabling this feature will reduce the average RAM load while running Invoke (peak RAM likely won't change), at the cost of slower model switching and LoRA patching. If you have limited RAM, you can disable this optimization:
|
||||
|
||||
```yaml
|
||||
# Set to false to reduce the average RAM usage at the cost of slower model switching and LoRA patching.
|
||||
keep_ram_copy_of_weights: false
|
||||
```
|
||||
|
||||
### Disabling Nvidia sysmem fallback (Windows only)
|
||||
|
||||
On Windows, Nvidia GPUs are able to use system RAM when their VRAM fills up via **sysmem fallback**. While it sounds like a good idea on the surface, in practice it causes massive slowdowns during generation.
|
||||
|
||||
It is strongly suggested to disable this feature:
|
||||
|
||||
- Open the **NVIDIA Control Panel** app.
|
||||
- Expand **3D Settings** on the left panel.
|
||||
- Click **Manage 3D Settings** in the left panel.
|
||||
- Find **CUDA - Sysmem Fallback Policy** in the right panel and set it to **Prefer No Sysmem Fallback**.
|
||||
|
||||

|
||||
|
||||
!!! tip "Invoke does the same thing, but better"
|
||||
|
||||
If the sysmem fallback feature sounds familiar, that's because Invoke's partial model loading strategy is conceptually very similar - use VRAM when there's room, else fall back to RAM.
|
||||
|
||||
Unfortunately, the Nvidia implementation is not optimized for applications like Invoke and does more harm than good.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Windows page file
|
||||
|
||||
Invoke has high virtual memory (a.k.a. 'committed memory') requirements. This can cause issues on Windows if the page file size limits are hit. (See this issue for the technical details on why this happens: https://github.com/invoke-ai/InvokeAI/issues/7563).
|
||||
|
||||
If you run out of page file space, InvokeAI may crash. Often, these crashes will happen with one of the following errors:
|
||||
|
||||
- InvokeAI exits with Windows error code `3221225477`
|
||||
- InvokeAI crashes without an error, but `eventvwr.msc` reveals an error with code `0xc0000005` (the hex equivalent of `3221225477`)
|
||||
|
||||
If you are running out of page file space, try the following solutions:
|
||||
|
||||
- Make sure that you have sufficient disk space for the page file to grow. Watch your disk usage as Invoke runs. If it climbs near 100% leading up to the crash, then this is very likely the source of the issue. Clear out some disk space to resolve the issue.
|
||||
- Make sure that your page file is set to "System managed size" (this is the default) rather than a custom size. Under the "System managed size" policy, the page file will grow dynamically as needed.
|
||||
@@ -50,9 +50,11 @@ title: Invoke
|
||||
|
||||
## Installation
|
||||
|
||||
The [Invoke Launcher](installation/quick_start.md) is the easiest way to install, update and run Invoke on Windows, macOS and Linux.
|
||||
The [installer script](installation/installer.md) is the easiest way to install and update the application.
|
||||
|
||||
You can also install Invoke as [python package](installation/manual.md) or with [docker](installation/docker.md).
|
||||
You can also install Invoke as python package [via PyPI](installation/manual.md) or [docker](installation/docker.md).
|
||||
|
||||
See the [installation section](./installation/index.md) for more information.
|
||||
|
||||
## Help
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ title: Docker
|
||||
|
||||
!!! warning "macOS users"
|
||||
|
||||
Docker can not access the GPU on macOS, so your generation speeds will be slow. Use the [launcher](./quick_start.md) instead.
|
||||
Docker can not access the GPU on macOS, so your generation speeds will be slow. Use the [installer](./installer.md) instead.
|
||||
|
||||
!!! tip "Linux and Windows Users"
|
||||
|
||||
|
||||
36
docs/installation/index.md
Normal file
36
docs/installation/index.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Installation and Updating Overview
|
||||
|
||||
Before installing, review the [installation requirements](./requirements.md) to ensure your system is set up properly.
|
||||
|
||||
See the [FAQ](../faq.md) for frequently-encountered installation issues.
|
||||
|
||||
If you need more help, join our [discord](https://discord.gg/ZmtBAhwWhy) or [create a GitHub issue](https://github.com/invoke-ai/InvokeAI/issues).
|
||||
|
||||
## Automated Installer & Updates
|
||||
|
||||
✅ The automated [installer](./installer.md) is the best way to install Invoke.
|
||||
|
||||
⬆️ The same installer is also the best way to update Invoke - simply rerun it for the same folder you installed to.
|
||||
|
||||
The installation process simply manages installation for the core libraries & application dependencies that run Invoke.
|
||||
|
||||
Models, images, or other assets in the Invoke root folder won't be affected by the installation process.
|
||||
|
||||
## Manual Install
|
||||
|
||||
If you are familiar with python and want more control over the packages that are installed, you can [install Invoke manually via PyPI](./manual.md).
|
||||
|
||||
Updates are managed by reinstalling the latest version through PyPi.
|
||||
|
||||
## Developer Install
|
||||
|
||||
If you want to contribute to InvokeAI, you'll need to set up a [dev environment](../contributing/dev-environment.md).
|
||||
|
||||
## Docker
|
||||
|
||||
Invoke publishes docker images. See the [docker installation guide](./docker.md) for details.
|
||||
|
||||
## Other Installation Guides
|
||||
|
||||
- [PyPatchMatch](./patchmatch.md)
|
||||
- [Installing Models](./models.md)
|
||||
@@ -1,10 +1,4 @@
|
||||
# Legacy Scripts
|
||||
|
||||
!!! warning "Legacy Scripts"
|
||||
|
||||
We recommend using the Invoke Launcher to install and update Invoke. It's a desktop application for Windows, macOS and Linux. It takes care of a lot of nitty gritty details for you.
|
||||
|
||||
Follow the [quick start guide](./quick_start.md) to get started.
|
||||
# Automatic Install & Updates
|
||||
|
||||
!!! tip "Use the installer to update"
|
||||
|
||||
@@ -4,11 +4,11 @@
|
||||
|
||||
**Python experience is mandatory.**
|
||||
|
||||
If you want to use Invoke locally, you should probably use the [launcher](./quick_start.md).
|
||||
If you want to use Invoke locally, you should probably use the [installer](./installer.md).
|
||||
|
||||
If you want to contribute to Invoke or run the app on the latest dev branch, instead follow the [dev environment](../contributing/dev-environment.md) guide.
|
||||
If you want to contribute to Invoke, instead follow the [dev environment](../contributing/dev-environment.md) guide.
|
||||
|
||||
InvokeAI is distributed as a python package on PyPI, installable with `pip`. There are a few things that are handled by the launcher that you'll need to manage manually, described in this guide.
|
||||
InvokeAI is distributed as a python package on PyPI, installable with `pip`. There are a few things that are handled by the installer and launcher that you'll need to manage manually, described in this guide.
|
||||
|
||||
## Requirements
|
||||
|
||||
@@ -16,39 +16,43 @@ Before you start, go through the [installation requirements](./requirements.md).
|
||||
|
||||
## Walkthrough
|
||||
|
||||
We'll use [`uv`](https://github.com/astral-sh/uv) to install python and create a virtual environment, then install the `invokeai` package. `uv` is a modern, very fast alternative to `pip`.
|
||||
|
||||
The following commands vary depending on the version of Invoke being installed and the system onto which it is being installed.
|
||||
|
||||
1. Install `uv` as described in its [docs](https://docs.astral.sh/uv/getting-started/installation/#standalone-installer). We suggest using the standalone installer method.
|
||||
|
||||
Run `uv --version` to confirm that `uv` is installed and working. After installation, you may need to restart your terminal to get access to `uv`.
|
||||
|
||||
2. Create a directory for your installation, typically in your home directory (e.g. `~/invokeai` or `$Home/invokeai`):
|
||||
1. Create a directory to contain your InvokeAI library, configuration files, and models. This is known as the "runtime" or "root" directory, and typically lives in your home directory under the name `invokeai`.
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
```bash
|
||||
mkdir ~/invokeai
|
||||
cd ~/invokeai
|
||||
```
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
|
||||
```bash
|
||||
mkdir $Home/invokeai
|
||||
cd $Home/invokeai
|
||||
```
|
||||
|
||||
3. Create a virtual environment in that directory:
|
||||
1. Enter the root directory and create a virtual Python environment within it named `.venv`.
|
||||
|
||||
```sh
|
||||
uv venv --relocatable --prompt invoke --python 3.11 --python-preference only-managed .venv
|
||||
```
|
||||
!!! warning "Virtual Environment Location"
|
||||
|
||||
This command creates a portable virtual environment at `.venv` complete with a portable python 3.11. It doesn't matter if your system has no python installed, or has a different version - `uv` will handle everything.
|
||||
While you may create the virtual environment anywhere in the file system, we recommend that you create it within the root directory as shown here. This allows the application to automatically detect its data directories.
|
||||
|
||||
4. Activate the virtual environment:
|
||||
If you choose a different location for the venv, then you _must_ set the `INVOKEAI_ROOT` environment variable or specify the root directory using the `--root` CLI arg.
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
```bash
|
||||
cd ~/invokeai
|
||||
python3 -m venv .venv --prompt InvokeAI
|
||||
```
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
|
||||
```bash
|
||||
cd $Home/invokeai
|
||||
python3 -m venv .venv --prompt InvokeAI
|
||||
```
|
||||
|
||||
1. Activate the new environment:
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
@@ -56,48 +60,41 @@ The following commands vary depending on the version of Invoke being installed a
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
=== "Windows"
|
||||
|
||||
```ps
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
5. Choose a version to install. Review the [GitHub releases page](https://github.com/invoke-ai/InvokeAI/releases).
|
||||
!!! info "Permissions Error (Windows)"
|
||||
|
||||
6. Determine the package package specifier to use when installing. This is a performance optimization.
|
||||
If you get a permissions error at this point, run this command and try again.
|
||||
|
||||
- If you have an Nvidia 20xx series GPU or older, use `invokeai[xformers]`.
|
||||
- If you have an Nvidia 30xx series GPU or newer, or do not have an Nvidia GPU, use `invokeai`.
|
||||
`Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`
|
||||
|
||||
7. Determine the `PyPI` index URL to use for installation, if any. This is necessary to get the right version of torch installed.
|
||||
The command-line prompt should change to to show `(InvokeAI)`, indicating the venv is active.
|
||||
|
||||
=== "Invoke v5 or later"
|
||||
1. Make sure that pip is installed in your virtual environment and up to date:
|
||||
|
||||
- If you are on Windows with an Nvidia GPU, use `https://download.pytorch.org/whl/cu124`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm6.1`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
=== "Invoke v4"
|
||||
|
||||
- If you are on Windows with an Nvidia GPU, use `https://download.pytorch.org/whl/cu124`.
|
||||
- If you are on Linux with no GPU, use `https://download.pytorch.org/whl/cpu`.
|
||||
- If you are on Linux with an AMD GPU, use `https://download.pytorch.org/whl/rocm5.2`.
|
||||
- **In all other cases, do not use an index.**
|
||||
|
||||
8. Install the `invokeai` package. Substitute the package specifier and version.
|
||||
|
||||
```sh
|
||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.11 --python-preference only-managed --force-reinstall
|
||||
```bash
|
||||
python3 -m pip install --upgrade pip
|
||||
```
|
||||
|
||||
If you determined you needed to use a `PyPI` index URL in the previous step, you'll need to add `--index=<INDEX_URL>` like this:
|
||||
1. Install the InvokeAI Package. The base command is `pip install InvokeAI --use-pep517`, but you may need to change this depending on your system and the desired features.
|
||||
|
||||
```sh
|
||||
uv pip install <PACKAGE_SPECIFIER>==<VERSION> --python 3.11 --python-preference only-managed --index=<INDEX_URL> --force-reinstall
|
||||
```
|
||||
- You may need to provide an [extra index URL](https://pip.pypa.io/en/stable/cli/pip_install/#cmdoption-extra-index-url). Select your platform configuration using [this tool on the PyTorch website](https://pytorch.org/get-started/locally/). Copy the `--extra-index-url` string from this and append it to your install command.
|
||||
|
||||
9. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:
|
||||
```bash
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||
```
|
||||
|
||||
- If you have a CUDA GPU and want to install with `xformers`, you need to add an option to the package name. Note that `xformers` is not strictly necessary. PyTorch includes an implementation of the SDP attention algorithm with similar performance for most GPUs.
|
||||
|
||||
```bash
|
||||
pip install "InvokeAI[xformers]" --use-pep517
|
||||
```
|
||||
|
||||
1. Deactivate and reactivate your venv so that the invokeai-specific commands become available in the environment:
|
||||
|
||||
=== "Linux/macOS"
|
||||
|
||||
@@ -105,31 +102,17 @@ The following commands vary depending on the version of Invoke being installed a
|
||||
deactivate && source .venv/bin/activate
|
||||
```
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
=== "Windows"
|
||||
|
||||
```ps
|
||||
deactivate
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
10. Run the application, specifying the directory you created earlier as the root directory:
|
||||
1. Run the application:
|
||||
|
||||
=== "Linux/macOS"
|
||||
Run `invokeai-web` to start the UI. You must activate the virtual environment before running the app.
|
||||
|
||||
```bash
|
||||
invokeai-web --root ~/invokeai
|
||||
```
|
||||
!!! warning
|
||||
|
||||
=== "Windows (PowerShell)"
|
||||
|
||||
```bash
|
||||
invokeai-web --root $Home/invokeai
|
||||
```
|
||||
|
||||
## Headless Install and Launch Scripts
|
||||
|
||||
If you run Invoke on a headless server, you might want to install and run Invoke on the command line.
|
||||
|
||||
We do not plan to maintain scripts to do this moving forward, instead focusing our dev resources on the GUI [launcher](../installation/quick_start.md).
|
||||
|
||||
You can create your own scripts for this by copying the handful of commands in this guide. `uv`'s [`pip` interface docs](https://docs.astral.sh/uv/reference/cli/#uv-pip-install) may be useful.
|
||||
If the virtual environment is _not_ inside the root directory, then you _must_ specify the path to the root directory with `--root \path\to\invokeai` or the `INVOKEAI_ROOT` environment variable.
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
# Invoke Community Edition Quick Start
|
||||
|
||||
Welcome to Invoke! Follow these steps to install, update, and get started creating.
|
||||
|
||||
## Step 1: System Requirements
|
||||
|
||||
Invoke runs on Windows 10+, macOS 14+ and Linux (Ubuntu 20.04+ is well-tested).
|
||||
|
||||
Hardware requirements vary significantly depending on model and image output size. The requirements below are rough guidelines.
|
||||
|
||||
- All Apple Silicon (M1, M2, etc) Macs work, but 16GB+ memory is recommended.
|
||||
- AMD GPUs are supported on Linux only. The VRAM requirements are the same as Nvidia GPUs.
|
||||
|
||||
!!! info "Hardware Requirements (Windows/Linux)"
|
||||
|
||||
=== "SD1.5 - 512×512"
|
||||
|
||||
- GPU: Nvidia 10xx series or later, 4GB+ VRAM.
|
||||
- Memory: At least 8GB RAM.
|
||||
- Disk: 10GB for base installation plus 30GB for models.
|
||||
|
||||
=== "SDXL - 1024×1024"
|
||||
|
||||
- GPU: Nvidia 20xx series or later, 8GB+ VRAM.
|
||||
- Memory: At least 16GB RAM.
|
||||
- Disk: 10GB for base installation plus 100GB for models.
|
||||
|
||||
=== "FLUX - 1024×1024"
|
||||
|
||||
- GPU: Nvidia 20xx series or later, 10GB+ VRAM.
|
||||
- Memory: At least 32GB RAM.
|
||||
- Disk: 10GB for base installation plus 200GB for models.
|
||||
|
||||
More detail on system requirements can be found [here](./requirements.md).
|
||||
|
||||
## Step 2: Download
|
||||
|
||||
Download the most launcher for your operating system:
|
||||
|
||||
- [Download for Windows](https://download.invoke.ai/Invoke%20Community%20Edition.exe)
|
||||
- [Download for macOS](https://download.invoke.ai/Invoke%20Community%20Edition.dmg)
|
||||
- [Download for Linux](https://download.invoke.ai/Invoke%20Community%20Edition.AppImage)
|
||||
|
||||
## Step 3: Install or Update
|
||||
|
||||
Run the launcher you just downloaded, click **Install** and follow the instructions to get set up.
|
||||
|
||||
If you have an existing Invoke installation, you can select it and let the launcher manage the install. You'll be able to update or launch the installation.
|
||||
|
||||
!!! warning "Problem running the launcher on macOS"
|
||||
|
||||
macOS may not allow you to run the launcher. We are working to resolve this by signing the launcher executable. Until that is done, you can either use the [legacy scripts](./legacy_scripts.md) to install, or manually flag the launcher as safe:
|
||||
|
||||
- Open the **Invoke-Installer-mac-arm64.dmg** file.
|
||||
- Drag the launcher to **Applications**.
|
||||
- Open a terminal.
|
||||
- Run `xattr -d 'com.apple.quarantine' /Applications/Invoke\ Community\ Edition.app`.
|
||||
|
||||
You should now be able to run the launcher.
|
||||
|
||||
## Step 4: Launch
|
||||
|
||||
Once installed, click **Finish**, then **Launch** to start Invoke.
|
||||
|
||||
The very first run after an installation or update will take a few extra moments to get ready.
|
||||
|
||||
!!! tip "Server Mode"
|
||||
|
||||
The launcher runs Invoke as a desktop application. You can enable **Server Mode** in the launcher's settings to disable this and instead access the UI through your web browser.
|
||||
|
||||
## Step 5: Install Models
|
||||
|
||||
With Invoke started up, you'll need to install some models.
|
||||
|
||||
The quickest way to get started is to install a **Starter Model** bundle. If you already have a model collection, Invoke can use it.
|
||||
|
||||
!!! info "Install Models"
|
||||
|
||||
=== "Install a Starter Model bundle"
|
||||
|
||||
1. Go to the **Models** tab.
|
||||
2. Click **Starter Models** on the right.
|
||||
3. Click one of the bundles to install its models. Refer to the [system requirements](#step-1-confirm-system-requirements) if you're unsure which model architecture will work for your system.
|
||||
|
||||
=== "Use my model collection"
|
||||
|
||||
4. Go to the **Models** tab.
|
||||
5. Click **Scan Folder** on the right.
|
||||
6. Paste the path to your models collection and click **Scan Folder**.
|
||||
7. With **In-place install** enabled, Invoke will leave the model files where they are. If you disable this, **Invoke will move the models into its own folders**.
|
||||
|
||||
You’re now ready to start creating!
|
||||
|
||||
## Step 6: Learn the Basics
|
||||
|
||||
We recommend watching our [Getting Started Playlist](https://www.youtube.com/playlist?list=PLvWK1Kc8iXGrQy8r9TYg6QdUuJ5MMx-ZO). It covers essential features and workflows, including:
|
||||
|
||||
- Generating your first image.
|
||||
- Using control layers and reference guides.
|
||||
- Refining images with advanced workflows.
|
||||
|
||||
## Other Installation Methods
|
||||
|
||||
- You can install the Invoke application as a python package. See our [manual install](./manual.md) docs.
|
||||
- You can run Invoke with docker. See our [docker install](./docker.md) docs.
|
||||
- You can still use our legacy scripts to install and run Invoke. See the [legacy scripts](./legacy_scripts.md) docs.
|
||||
|
||||
## Need Help?
|
||||
|
||||
- Visit our [Support Portal](https://support.invoke.ai).
|
||||
- Watch the [Getting Started Playlist](https://www.youtube.com/playlist?list=PLvWK1Kc8iXGrQy8r9TYg6QdUuJ5MMx-ZO).
|
||||
- Join the conversation on [Discord][discord link].
|
||||
|
||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||
@@ -1,33 +1,90 @@
|
||||
# Requirements
|
||||
|
||||
Invoke runs on Windows 10+, macOS 14+ and Linux (Ubuntu 20.04+ is well-tested).
|
||||
## GPU
|
||||
|
||||
## Hardware
|
||||
!!! warning "Problematic Nvidia GPUs"
|
||||
|
||||
Hardware requirements vary significantly depending on model and image output size. The requirements below are rough guidelines.
|
||||
We do not recommend these GPUs. They cannot operate with half precision, but have insufficient VRAM to generate 512x512 images at full precision.
|
||||
|
||||
- All Apple Silicon (M1, M2, etc) Macs work, but 16GB+ memory is recommended.
|
||||
- AMD GPUs are supported on Linux only. The VRAM requirements are the same as Nvidia GPUs.
|
||||
- NVIDIA 10xx series cards such as the 1080 TI
|
||||
- GTX 1650 series cards
|
||||
- GTX 1660 series cards
|
||||
|
||||
!!! info "Hardware Requirements (Windows/Linux)"
|
||||
Invoke runs best with a dedicated GPU, but will fall back to running on CPU, albeit much slower. You'll need a beefier GPU for SDXL.
|
||||
|
||||
=== "SD1.5 - 512×512"
|
||||
!!! example "Stable Diffusion 1.5"
|
||||
|
||||
- GPU: Nvidia 10xx series or later, 4GB+ VRAM.
|
||||
- Memory: At least 8GB RAM.
|
||||
- Disk: 10GB for base installation plus 30GB for models.
|
||||
=== "Nvidia"
|
||||
|
||||
=== "SDXL - 1024×1024"
|
||||
```
|
||||
Any GPU with at least 4GB VRAM.
|
||||
```
|
||||
|
||||
- GPU: Nvidia 20xx series or later, 8GB+ VRAM.
|
||||
- Memory: At least 16GB RAM.
|
||||
- Disk: 10GB for base installation plus 100GB for models.
|
||||
=== "AMD"
|
||||
|
||||
=== "FLUX - 1024×1024"
|
||||
```
|
||||
Any GPU with at least 4GB VRAM. Linux only.
|
||||
```
|
||||
|
||||
- GPU: Nvidia 20xx series or later, 10GB+ VRAM.
|
||||
- Memory: At least 32GB RAM.
|
||||
- Disk: 10GB for base installation plus 200GB for models.
|
||||
=== "Mac"
|
||||
|
||||
```
|
||||
Any Apple Silicon Mac with at least 8GB memory.
|
||||
```
|
||||
|
||||
!!! example "Stable Diffusion XL"
|
||||
|
||||
=== "Nvidia"
|
||||
|
||||
```
|
||||
Any GPU with at least 8GB VRAM.
|
||||
```
|
||||
|
||||
=== "AMD"
|
||||
|
||||
```
|
||||
Any GPU with at least 16GB VRAM. Linux only.
|
||||
```
|
||||
|
||||
=== "Mac"
|
||||
|
||||
```
|
||||
Any Apple Silicon Mac with at least 16GB memory.
|
||||
```
|
||||
|
||||
## RAM
|
||||
|
||||
At least 12GB of RAM.
|
||||
|
||||
## Disk
|
||||
|
||||
SSDs will, of course, offer the best performance.
|
||||
|
||||
The base application disk usage depends on the torch backend.
|
||||
|
||||
!!! example "Disk"
|
||||
|
||||
=== "Nvidia (CUDA)"
|
||||
|
||||
```
|
||||
~6.5GB
|
||||
```
|
||||
|
||||
=== "AMD (ROCm)"
|
||||
|
||||
```
|
||||
~12GB
|
||||
```
|
||||
|
||||
=== "Mac (MPS)"
|
||||
|
||||
```
|
||||
~3.5GB
|
||||
```
|
||||
|
||||
You'll need to set aside some space for images, depending on how much you generate. A couple GB is enough to get started.
|
||||
|
||||
You'll need a good chunk of space for models. Even if you only install the most popular models and the usual support models (ControlNet, IP Adapter ,etc), you will quickly hit 50GB of models.
|
||||
|
||||
!!! info "`tmpfs` on Linux"
|
||||
|
||||
@@ -35,32 +92,26 @@ Hardware requirements vary significantly depending on model and image output siz
|
||||
|
||||
## Python
|
||||
|
||||
!!! tip "The launcher installs python for you"
|
||||
|
||||
You don't need to do this if you are installing with the [Invoke Launcher](./quick_start.md).
|
||||
|
||||
Invoke requires python 3.10 or 3.11. If you don't already have one of these versions installed, we suggest installing 3.11, as it will be supported for longer.
|
||||
|
||||
Check that your system has an up-to-date Python installed by running `python3 --version` in the terminal (Linux, macOS) or cmd/powershell (Windows).
|
||||
Check that your system has an up-to-date Python installed by running `python --version` in the terminal (Linux, macOS) or cmd/powershell (Windows).
|
||||
|
||||
!!! info "Installing Python"
|
||||
<h3>Installing Python (Windows)</h3>
|
||||
|
||||
=== "Windows"
|
||||
- Install python 3.11 with [an official installer].
|
||||
- The installer includes an option to add python to your PATH. Be sure to enable this. If you missed it, re-run the installer, choose to modify an existing installation, and tick that checkbox.
|
||||
- You may need to install [Microsoft Visual C++ Redistributable].
|
||||
|
||||
- Install python 3.11 with [an official installer].
|
||||
- The installer includes an option to add python to your PATH. Be sure to enable this. If you missed it, re-run the installer, choose to modify an existing installation, and tick that checkbox.
|
||||
- You may need to install [Microsoft Visual C++ Redistributable].
|
||||
<h3>Installing Python (macOS)</h3>
|
||||
|
||||
=== "macOS"
|
||||
- Install python 3.11 with [an official installer].
|
||||
- If model installs fail with a certificate error, you may need to run this command (changing the python version to match what you have installed): `/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
- If you haven't already, you will need to install the XCode CLI Tools by running `xcode-select --install` in a terminal.
|
||||
|
||||
- Install python 3.11 with [an official installer].
|
||||
- If model installs fail with a certificate error, you may need to run this command (changing the python version to match what you have installed): `/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
- If you haven't already, you will need to install the XCode CLI Tools by running `xcode-select --install` in a terminal.
|
||||
<h3>Installing Python (Linux)</h3>
|
||||
|
||||
=== "Linux"
|
||||
|
||||
- Installing python varies depending on your system. On Ubuntu, you can use the [deadsnakes PPA](https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa).
|
||||
- You'll need to install `libglib2.0-0` and `libgl1-mesa-glx` for OpenCV to work. For example, on a Debian system: `sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
|
||||
- Follow the [linux install instructions], being sure to install python 3.11.
|
||||
- You'll need to install `libglib2.0-0` and `libgl1-mesa-glx` for OpenCV to work. For example, on a Debian system: `sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
|
||||
|
||||
## Drivers
|
||||
|
||||
@@ -124,4 +175,7 @@ An alternative to installing ROCm locally is to use a [ROCm docker container] to
|
||||
[ROCm Documentation]: https://rocm.docs.amd.com/projects/install-on-linux/en/latest/tutorial/quick-start.html
|
||||
[cuDNN support matrix]: https://docs.nvidia.com/deeplearning/cudnn/support-matrix/index.html
|
||||
[Nvidia Container Runtime]: https://developer.nvidia.com/container-runtime
|
||||
[linux install instructions]: https://docs.python-guide.org/starting/install3/linux/
|
||||
[Microsoft Visual C++ Redistributable]: https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
|
||||
[an official installer]: https://www.python.org/downloads/
|
||||
[CUDA Toolkit Downloads]: https://developer.nvidia.com/cuda-downloads
|
||||
|
||||
@@ -49,7 +49,6 @@ To use a community workflow, download the `.json` node graph file and load it in
|
||||
+ [BriaAI Background Remove](#briaai-remove-background)
|
||||
+ [Remove Background](#remove-background)
|
||||
+ [Retroize](#retroize)
|
||||
+ [Stereogram](#stereogram-nodes)
|
||||
+ [Size Stepper Nodes](#size-stepper-nodes)
|
||||
+ [Simple Skin Detection](#simple-skin-detection)
|
||||
+ [Text font to Image](#text-font-to-image)
|
||||
@@ -527,16 +526,6 @@ View:
|
||||
|
||||
<img src="https://github.com/Ar7ific1al/InvokeAI_nodes_retroize/assets/2306586/de8b4fa6-324c-4c2d-b36c-297600c73974" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Stereogram Nodes
|
||||
|
||||
**Description:** A set of custom nodes for InvokeAI to create cross-view or parallel-view stereograms. Stereograms are 2D images that, when viewed properly, reveal a 3D scene. Check out [r/crossview](https://www.reddit.com/r/CrossView/) for tutorials.
|
||||
|
||||
**Node Link:** https://github.com/simonfuhrmann/invokeai-stereo
|
||||
|
||||
**Example Workflow and Output**
|
||||
</br><img src="https://raw.githubusercontent.com/simonfuhrmann/invokeai-stereo/refs/heads/main/docs/example_promo_03.jpg" width="600" />
|
||||
|
||||
--------------------------------
|
||||
### Simple Skin Detection
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ class DeleteBoardResult(BaseModel):
|
||||
response_model=BoardDTO,
|
||||
)
|
||||
async def create_board(
|
||||
board_name: str = Query(description="The name of the board to create", max_length=300),
|
||||
board_name: str = Query(description="The name of the board to create"),
|
||||
is_private: bool = Query(default=False, description="Whether the board is private"),
|
||||
) -> BoardDTO:
|
||||
"""Creates a board"""
|
||||
|
||||
@@ -858,18 +858,6 @@ async def get_stats() -> Optional[CacheStats]:
|
||||
return ApiDependencies.invoker.services.model_manager.load.ram_cache.stats
|
||||
|
||||
|
||||
@model_manager_router.post(
|
||||
"/empty_model_cache",
|
||||
operation_id="empty_model_cache",
|
||||
status_code=200,
|
||||
)
|
||||
async def empty_model_cache() -> None:
|
||||
"""Drop all models from the model cache to free RAM/VRAM. 'Locked' models that are in active use will not be dropped."""
|
||||
# Request 1000GB of room in order to force the cache to drop all models.
|
||||
ApiDependencies.invoker.services.logger.info("Emptying model cache.")
|
||||
ApiDependencies.invoker.services.model_manager.load.ram_cache.make_room(1000 * 2**30)
|
||||
|
||||
|
||||
class HFTokenStatus(str, Enum):
|
||||
VALID = "valid"
|
||||
INVALID = "invalid"
|
||||
|
||||
@@ -10,7 +10,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
QUEUE_ITEM_STATUS,
|
||||
Batch,
|
||||
BatchStatus,
|
||||
CancelAllExceptCurrentResult,
|
||||
CancelByBatchIDsResult,
|
||||
CancelByDestinationResult,
|
||||
ClearResult,
|
||||
@@ -95,18 +94,6 @@ async def Pause(
|
||||
return ApiDependencies.invoker.services.session_processor.pause()
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/cancel_all_except_current",
|
||||
operation_id="cancel_all_except_current",
|
||||
responses={200: {"model": CancelAllExceptCurrentResult}},
|
||||
)
|
||||
async def cancel_all_except_current(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> CancelAllExceptCurrentResult:
|
||||
"""Immediately cancels all queue items except in-processing items"""
|
||||
return ApiDependencies.invoker.services.session_queue.cancel_all_except_current(queue_id=queue_id)
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/cancel_by_batch_ids",
|
||||
operation_id="cancel_by_batch_ids",
|
||||
|
||||
@@ -25,7 +25,6 @@ async def parse_dynamicprompts(
|
||||
prompt: str = Body(description="The prompt to parse with dynamicprompts"),
|
||||
max_prompts: int = Body(ge=1, le=10000, default=1000, description="The max number of prompts to generate"),
|
||||
combinatorial: bool = Body(default=True, description="Whether to use the combinatorial generator"),
|
||||
seed: int | None = Body(None, description="The seed to use for random generation. Only used if not combinatorial"),
|
||||
) -> DynamicPromptsResponse:
|
||||
"""Creates a batch process"""
|
||||
max_prompts = min(max_prompts, 10000)
|
||||
@@ -36,7 +35,7 @@ async def parse_dynamicprompts(
|
||||
generator = CombinatorialPromptGenerator()
|
||||
prompts = generator.generate(prompt, max_prompts=max_prompts)
|
||||
else:
|
||||
generator = RandomPromptGenerator(seed=seed)
|
||||
generator = RandomPromptGenerator()
|
||||
prompts = generator.generate(prompt, num_images=max_prompts)
|
||||
except ParseException as e:
|
||||
prompts = [prompt]
|
||||
|
||||
@@ -59,32 +59,11 @@ logger.info(f"Using torch device: {torch_device_name}")
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
# We may change the port if the default is in use, this global variable is used to store the port so that we can log
|
||||
# the correct port when the server starts in the lifespan handler.
|
||||
port = app_config.port
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# Add startup event to load dependencies
|
||||
ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, loop=loop, logger=logger)
|
||||
|
||||
# Log the server address when it starts - in case the network log level is not high enough to see the startup log
|
||||
proto = "https" if app_config.ssl_certfile else "http"
|
||||
msg = f"Invoke running on {proto}://{app_config.host}:{port} (Press CTRL+C to quit)"
|
||||
|
||||
# Logging this way ignores the logger's log level and _always_ logs the message
|
||||
record = logger.makeRecord(
|
||||
name=logger.name,
|
||||
level=logging.INFO,
|
||||
fn="",
|
||||
lno=0,
|
||||
msg=msg,
|
||||
args=(),
|
||||
exc_info=None,
|
||||
)
|
||||
logger.handle(record)
|
||||
|
||||
yield
|
||||
# Shut down threads
|
||||
ApiDependencies.shutdown()
|
||||
@@ -227,7 +206,6 @@ def invoke_api() -> None:
|
||||
else:
|
||||
jurigged.watch(logger=InvokeAILogger.get_logger(name="jurigged").info)
|
||||
|
||||
global port
|
||||
port = find_port(app_config.port)
|
||||
if port != app_config.port:
|
||||
logger.warn(f"Port {app_config.port} in use, using port {port}")
|
||||
@@ -239,17 +217,18 @@ def invoke_api() -> None:
|
||||
host=app_config.host,
|
||||
port=port,
|
||||
loop="asyncio",
|
||||
log_level=app_config.log_level_network,
|
||||
log_level=app_config.log_level,
|
||||
ssl_certfile=app_config.ssl_certfile,
|
||||
ssl_keyfile=app_config.ssl_keyfile,
|
||||
)
|
||||
server = uvicorn.Server(config)
|
||||
|
||||
# replace uvicorn's loggers with InvokeAI's for consistent appearance
|
||||
uvicorn_logger = InvokeAILogger.get_logger("uvicorn")
|
||||
uvicorn_logger.handlers.clear()
|
||||
for hdlr in logger.handlers:
|
||||
uvicorn_logger.addHandler(hdlr)
|
||||
for logname in ["uvicorn.access", "uvicorn"]:
|
||||
log = InvokeAILogger.get_logger(logname)
|
||||
log.handlers.clear()
|
||||
for ch in logger.handlers:
|
||||
log.addHandler(ch)
|
||||
|
||||
loop.run_until_complete(server.serve())
|
||||
|
||||
|
||||
@@ -1,237 +0,0 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import (
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
OutputField,
|
||||
)
|
||||
from invokeai.app.invocations.primitives import (
|
||||
FloatOutput,
|
||||
ImageOutput,
|
||||
IntegerOutput,
|
||||
StringOutput,
|
||||
)
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
|
||||
BATCH_GROUP_IDS = Literal[
|
||||
"None",
|
||||
"Group 1",
|
||||
"Group 2",
|
||||
"Group 3",
|
||||
"Group 4",
|
||||
"Group 5",
|
||||
]
|
||||
|
||||
|
||||
class NotExecutableNodeError(Exception):
|
||||
def __init__(self, message: str = "This class should never be executed or instantiated directly."):
|
||||
super().__init__(message)
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class BaseBatchInvocation(BaseInvocation):
|
||||
batch_group_id: BATCH_GROUP_IDS = InputField(
|
||||
default="None",
|
||||
description="The ID of this batch node's group. If provided, all batch nodes in with the same ID will be 'zipped' before execution, and all nodes' collections must be of the same size.",
|
||||
input=Input.Direct,
|
||||
title="Batch Group",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation(
|
||||
"image_batch",
|
||||
title="Image Batch",
|
||||
tags=["primitives", "image", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class ImageBatchInvocation(BaseBatchInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each image in the batch."""
|
||||
|
||||
images: list[ImageField] = InputField(
|
||||
default=[], min_length=1, description="The images to batch over", input=Input.Direct
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation(
|
||||
"string_batch",
|
||||
title="String Batch",
|
||||
tags=["primitives", "string", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class StringBatchInvocation(BaseBatchInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each string in the batch."""
|
||||
|
||||
strings: list[str] = InputField(
|
||||
default=[],
|
||||
min_length=1,
|
||||
description="The strings to batch over",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> StringOutput:
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation_output("string_generator_output")
|
||||
class StringGeneratorOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a collection of strings"""
|
||||
|
||||
strings: list[str] = OutputField(description="The generated strings")
|
||||
|
||||
|
||||
class StringGeneratorField(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
@invocation(
|
||||
"string_generator",
|
||||
title="String Generator",
|
||||
tags=["primitives", "string", "number", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class StringGenerator(BaseInvocation):
|
||||
"""Generated a range of strings for use in a batched generation"""
|
||||
|
||||
generator: StringGeneratorField = InputField(
|
||||
description="The string generator.",
|
||||
input=Input.Direct,
|
||||
title="Generator Type",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
def invoke(self, context: InvocationContext) -> StringGeneratorOutput:
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation(
|
||||
"integer_batch",
|
||||
title="Integer Batch",
|
||||
tags=["primitives", "integer", "number", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class IntegerBatchInvocation(BaseBatchInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each integer in the batch."""
|
||||
|
||||
integers: list[int] = InputField(
|
||||
default=[],
|
||||
min_length=1,
|
||||
description="The integers to batch over",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntegerOutput:
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation_output("integer_generator_output")
|
||||
class IntegerGeneratorOutput(BaseInvocationOutput):
|
||||
integers: list[int] = OutputField(description="The generated integers")
|
||||
|
||||
|
||||
class IntegerGeneratorField(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
@invocation(
|
||||
"integer_generator",
|
||||
title="Integer Generator",
|
||||
tags=["primitives", "int", "number", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class IntegerGenerator(BaseInvocation):
|
||||
"""Generated a range of integers for use in a batched generation"""
|
||||
|
||||
generator: IntegerGeneratorField = InputField(
|
||||
description="The integer generator.",
|
||||
input=Input.Direct,
|
||||
title="Generator Type",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntegerGeneratorOutput:
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation(
|
||||
"float_batch",
|
||||
title="Float Batch",
|
||||
tags=["primitives", "float", "number", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class FloatBatchInvocation(BaseBatchInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each float in the batch."""
|
||||
|
||||
floats: list[float] = InputField(
|
||||
default=[],
|
||||
min_length=1,
|
||||
description="The floats to batch over",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FloatOutput:
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
|
||||
@invocation_output("float_generator_output")
|
||||
class FloatGeneratorOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a collection of floats"""
|
||||
|
||||
floats: list[float] = OutputField(description="The generated floats")
|
||||
|
||||
|
||||
class FloatGeneratorField(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
@invocation(
|
||||
"float_generator",
|
||||
title="Float Generator",
|
||||
tags=["primitives", "float", "number", "batch", "special"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class FloatGenerator(BaseInvocation):
|
||||
"""Generated a range of floats for use in a batched generation"""
|
||||
|
||||
generator: FloatGeneratorField = InputField(
|
||||
description="The float generator.",
|
||||
input=Input.Direct,
|
||||
title="Generator Type",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
raise NotExecutableNodeError()
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FloatGeneratorOutput:
|
||||
raise NotExecutableNodeError()
|
||||
@@ -63,6 +63,9 @@ class CompelInvocation(BaseInvocation):
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ConditioningOutput:
|
||||
tokenizer_info = context.models.load(self.clip.tokenizer)
|
||||
text_encoder_info = context.models.load(self.clip.text_encoder)
|
||||
|
||||
def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
|
||||
for lora in self.clip.loras:
|
||||
lora_info = context.models.load(lora.lora)
|
||||
@@ -73,18 +76,17 @@ class CompelInvocation(BaseInvocation):
|
||||
|
||||
# loras = [(context.models.get(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras]
|
||||
|
||||
text_encoder_info = context.models.load(self.clip.text_encoder)
|
||||
ti_list = generate_ti_list(self.prompt, text_encoder_info.config.base, context)
|
||||
|
||||
with (
|
||||
# apply all patches while the model is on the target device
|
||||
text_encoder_info.model_on_device() as (cached_weights, text_encoder),
|
||||
context.models.load(self.clip.tokenizer) as tokenizer,
|
||||
tokenizer_info as tokenizer,
|
||||
LayerPatcher.apply_smart_model_patches(
|
||||
model=text_encoder,
|
||||
patches=_lora_loader(),
|
||||
prefix="lora_te_",
|
||||
dtype=text_encoder.dtype,
|
||||
dtype=TorchDevice.choose_torch_dtype(),
|
||||
cached_weights=cached_weights,
|
||||
),
|
||||
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
|
||||
@@ -103,7 +105,6 @@ class CompelInvocation(BaseInvocation):
|
||||
textual_inversion_manager=ti_manager,
|
||||
dtype_for_device_getter=TorchDevice.choose_torch_dtype,
|
||||
truncate_long_prompts=False,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(self.prompt)
|
||||
@@ -138,7 +139,9 @@ class SDXLPromptInvocationBase:
|
||||
lora_prefix: str,
|
||||
zero_on_empty: bool,
|
||||
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
||||
tokenizer_info = context.models.load(clip_field.tokenizer)
|
||||
text_encoder_info = context.models.load(clip_field.text_encoder)
|
||||
|
||||
# return zero on empty
|
||||
if prompt == "" and zero_on_empty:
|
||||
cpu_text_encoder = text_encoder_info.model
|
||||
@@ -176,12 +179,12 @@ class SDXLPromptInvocationBase:
|
||||
with (
|
||||
# apply all patches while the model is on the target device
|
||||
text_encoder_info.model_on_device() as (cached_weights, text_encoder),
|
||||
context.models.load(clip_field.tokenizer) as tokenizer,
|
||||
tokenizer_info as tokenizer,
|
||||
LayerPatcher.apply_smart_model_patches(
|
||||
model=text_encoder,
|
||||
patches=_lora_loader(),
|
||||
prefix=lora_prefix,
|
||||
dtype=text_encoder.dtype,
|
||||
dtype=TorchDevice.choose_torch_dtype(),
|
||||
cached_weights=cached_weights,
|
||||
),
|
||||
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
|
||||
@@ -204,7 +207,6 @@ class SDXLPromptInvocationBase:
|
||||
truncate_long_prompts=False, # TODO:
|
||||
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, # TODO: clip skip
|
||||
requires_pooled=get_pooled,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
)
|
||||
|
||||
conjunction = Compel.parse_prompt_string(prompt)
|
||||
@@ -222,6 +224,7 @@ class SDXLPromptInvocationBase:
|
||||
|
||||
del tokenizer
|
||||
del text_encoder
|
||||
del tokenizer_info
|
||||
del text_encoder_info
|
||||
|
||||
c = c.detach().to("cpu")
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from typing import Literal
|
||||
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
LATENT_SCALE_FACTOR = 8
|
||||
"""
|
||||
HACK: Many nodes are currently hard-coded to use a fixed latent scale factor of 8. This is fragile, and will need to
|
||||
@@ -10,3 +12,5 @@ The ratio of image:latent dimensions is LATENT_SCALE_FACTOR:1, or 8:1.
|
||||
|
||||
IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"]
|
||||
"""A literal type for PIL image modes supported by Invoke"""
|
||||
|
||||
DEFAULT_PRECISION = TorchDevice.choose_torch_dtype()
|
||||
|
||||
@@ -6,6 +6,7 @@ from PIL import Image
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.constants import DEFAULT_PRECISION
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField
|
||||
from invokeai.app.invocations.image_to_latents import ImageToLatentsInvocation
|
||||
from invokeai.app.invocations.model import VAEField
|
||||
@@ -28,7 +29,11 @@ class CreateDenoiseMaskInvocation(BaseInvocation):
|
||||
image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1)
|
||||
mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2)
|
||||
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3)
|
||||
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32, ui_order=4)
|
||||
fp32: bool = InputField(
|
||||
default=DEFAULT_PRECISION == torch.float32,
|
||||
description=FieldDescriptions.fp32,
|
||||
ui_order=4,
|
||||
)
|
||||
|
||||
def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor:
|
||||
if mask_image.mode != "L":
|
||||
|
||||
@@ -7,6 +7,7 @@ from PIL import Image, ImageFilter
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.constants import DEFAULT_PRECISION
|
||||
from invokeai.app.invocations.fields import (
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
@@ -75,7 +76,11 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
ui_order=7,
|
||||
)
|
||||
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=8)
|
||||
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32, ui_order=9)
|
||||
fp32: bool = InputField(
|
||||
default=DEFAULT_PRECISION == torch.float32,
|
||||
description=FieldDescriptions.fp32,
|
||||
ui_order=9,
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> GradientMaskOutput:
|
||||
|
||||
@@ -10,9 +10,7 @@ import torchvision.transforms as T
|
||||
from diffusers.configuration_utils import ConfigMixin
|
||||
from diffusers.models.adapter import T2IAdapter
|
||||
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
from diffusers.schedulers.scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
|
||||
from diffusers.schedulers.scheduling_dpmsolver_sde import DPMSolverSDEScheduler
|
||||
from diffusers.schedulers.scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
|
||||
from diffusers.schedulers.scheduling_tcd import TCDScheduler
|
||||
from diffusers.schedulers.scheduling_utils import SchedulerMixin as Scheduler
|
||||
from PIL import Image
|
||||
@@ -40,7 +38,6 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import prepare_control_image
|
||||
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
||||
from invokeai.backend.model_manager import BaseModelType, ModelVariantType
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
from invokeai.backend.model_patcher import ModelPatcher
|
||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
@@ -86,14 +83,12 @@ def get_scheduler(
|
||||
scheduler_info: ModelIdentifierField,
|
||||
scheduler_name: str,
|
||||
seed: int,
|
||||
unet_config: AnyModelConfig,
|
||||
) -> Scheduler:
|
||||
"""Load a scheduler and apply some scheduler-specific overrides."""
|
||||
# TODO(ryand): Silently falling back to ddim seems like a bad idea. Look into why this was added and remove if
|
||||
# possible.
|
||||
scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP["ddim"])
|
||||
orig_scheduler_info = context.models.load(scheduler_info)
|
||||
|
||||
with orig_scheduler_info as orig_scheduler:
|
||||
scheduler_config = orig_scheduler.config
|
||||
|
||||
@@ -105,17 +100,10 @@ def get_scheduler(
|
||||
"_backup": scheduler_config,
|
||||
}
|
||||
|
||||
if hasattr(unet_config, "prediction_type"):
|
||||
scheduler_config["prediction_type"] = unet_config.prediction_type
|
||||
|
||||
# make dpmpp_sde reproducable(seed can be passed only in initializer)
|
||||
if scheduler_class is DPMSolverSDEScheduler:
|
||||
scheduler_config["noise_sampler_seed"] = seed
|
||||
|
||||
if scheduler_class is DPMSolverMultistepScheduler or scheduler_class is DPMSolverSinglestepScheduler:
|
||||
if scheduler_config["_class_name"] == "DEISMultistepScheduler" and scheduler_config["algorithm_type"] == "deis":
|
||||
scheduler_config["algorithm_type"] = "dpmsolver++"
|
||||
|
||||
scheduler = scheduler_class.from_config(scheduler_config)
|
||||
|
||||
# hack copied over from generate.py
|
||||
@@ -423,7 +411,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
context: InvocationContext,
|
||||
control_input: ControlField | list[ControlField] | None,
|
||||
latents_shape: List[int],
|
||||
device: torch.device,
|
||||
exit_stack: ExitStack,
|
||||
do_classifier_free_guidance: bool = True,
|
||||
) -> list[ControlNetData] | None:
|
||||
@@ -465,7 +452,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
height=control_height_resize,
|
||||
# batch_size=batch_size * num_images_per_prompt,
|
||||
# num_images_per_prompt=num_images_per_prompt,
|
||||
device=device,
|
||||
device=control_model.device,
|
||||
dtype=control_model.dtype,
|
||||
control_mode=control_info.control_mode,
|
||||
resize_mode=control_info.resize_mode,
|
||||
@@ -560,6 +547,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
for single_ip_adapter in ip_adapters:
|
||||
with context.models.load(single_ip_adapter.ip_adapter_model) as ip_adapter_model:
|
||||
assert isinstance(ip_adapter_model, IPAdapter)
|
||||
image_encoder_model_info = context.models.load(single_ip_adapter.image_encoder_model)
|
||||
# `single_ip_adapter.image` could be a list or a single ImageField. Normalize to a list here.
|
||||
single_ipa_image_fields = single_ip_adapter.image
|
||||
if not isinstance(single_ipa_image_fields, list):
|
||||
@@ -568,7 +556,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
single_ipa_images = [
|
||||
context.images.get_pil(image.image_name, mode="RGB") for image in single_ipa_image_fields
|
||||
]
|
||||
with context.models.load(single_ip_adapter.image_encoder_model) as image_encoder_model:
|
||||
with image_encoder_model_info as image_encoder_model:
|
||||
assert isinstance(image_encoder_model, CLIPVisionModelWithProjection)
|
||||
# Get image embeddings from CLIP and ImageProjModel.
|
||||
image_prompt_embeds, uncond_image_prompt_embeds = ip_adapter_model.get_image_embeds(
|
||||
@@ -618,7 +606,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
context: InvocationContext,
|
||||
t2i_adapter: Optional[Union[T2IAdapterField, list[T2IAdapterField]]],
|
||||
latents_shape: list[int],
|
||||
device: torch.device,
|
||||
do_classifier_free_guidance: bool,
|
||||
) -> Optional[list[T2IAdapterData]]:
|
||||
if t2i_adapter is None:
|
||||
@@ -634,6 +621,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
t2i_adapter_data = []
|
||||
for t2i_adapter_field in t2i_adapter:
|
||||
t2i_adapter_model_config = context.models.get_config(t2i_adapter_field.t2i_adapter_model.key)
|
||||
t2i_adapter_loaded_model = context.models.load(t2i_adapter_field.t2i_adapter_model)
|
||||
image = context.images.get_pil(t2i_adapter_field.image.image_name, mode="RGB")
|
||||
|
||||
# The max_unet_downscale is the maximum amount that the UNet model downscales the latent image internally.
|
||||
@@ -649,7 +637,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
raise ValueError(f"Unexpected T2I-Adapter base model type: '{t2i_adapter_model_config.base}'.")
|
||||
|
||||
t2i_adapter_model: T2IAdapter
|
||||
with context.models.load(t2i_adapter_field.t2i_adapter_model) as t2i_adapter_model:
|
||||
with t2i_adapter_loaded_model as t2i_adapter_model:
|
||||
total_downscale_factor = t2i_adapter_model.total_downscale_factor
|
||||
|
||||
# Note: We have hard-coded `do_classifier_free_guidance=False`. This is because we only want to prepare
|
||||
@@ -669,7 +657,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
width=control_width_resize,
|
||||
height=control_height_resize,
|
||||
num_channels=t2i_adapter_model.config["in_channels"], # mypy treats this as a FrozenDict
|
||||
device=device,
|
||||
device=t2i_adapter_model.device,
|
||||
dtype=t2i_adapter_model.dtype,
|
||||
resize_mode=t2i_adapter_field.resize_mode,
|
||||
)
|
||||
@@ -834,9 +822,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
seed, noise, latents = self.prepare_noise_and_latents(context, self.noise, self.latents)
|
||||
_, _, latent_height, latent_width = latents.shape
|
||||
|
||||
# get the unet's config so that we can pass the base to sd_step_callback()
|
||||
unet_config = context.models.get_config(self.unet.unet.key)
|
||||
|
||||
conditioning_data = self.get_conditioning_data(
|
||||
context=context,
|
||||
positive_conditioning_field=self.positive_conditioning,
|
||||
@@ -856,7 +841,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
scheduler_info=self.unet.scheduler,
|
||||
scheduler_name=self.scheduler,
|
||||
seed=seed,
|
||||
unet_config=unet_config,
|
||||
)
|
||||
|
||||
timesteps, init_timestep, scheduler_step_kwargs = self.init_scheduler(
|
||||
@@ -868,6 +852,9 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
denoising_end=self.denoising_end,
|
||||
)
|
||||
|
||||
# get the unet's config so that we can pass the base to sd_step_callback()
|
||||
unet_config = context.models.get_config(self.unet.unet.key)
|
||||
|
||||
### preview
|
||||
def step_callback(state: PipelineIntermediateState) -> None:
|
||||
context.util.sd_step_callback(state, unet_config.base)
|
||||
@@ -939,8 +926,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
# ext: t2i/ip adapter
|
||||
ext_manager.run_callback(ExtensionCallbackType.SETUP, denoise_ctx)
|
||||
|
||||
unet_info = context.models.load(self.unet.unet)
|
||||
assert isinstance(unet_info.model, UNet2DConditionModel)
|
||||
with (
|
||||
context.models.load(self.unet.unet).model_on_device() as (cached_weights, unet),
|
||||
unet_info.model_on_device() as (cached_weights, unet),
|
||||
ModelPatcher.patch_unet_attention_processor(unet, denoise_ctx.inputs.attention_processor_cls),
|
||||
# ext: controlnet
|
||||
ext_manager.patch_extensions(denoise_ctx),
|
||||
@@ -961,7 +950,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
@torch.no_grad()
|
||||
@SilenceWarnings() # This quenches the NSFW nag from diffusers.
|
||||
def _old_invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
device = TorchDevice.choose_torch_device()
|
||||
seed, noise, latents = self.prepare_noise_and_latents(context, self.noise, self.latents)
|
||||
|
||||
mask, masked_latents, gradient_mask = self.prep_inpaint_mask(context, latents)
|
||||
@@ -976,7 +964,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
context,
|
||||
self.t2i_adapter,
|
||||
latents.shape,
|
||||
device=device,
|
||||
do_classifier_free_guidance=True,
|
||||
)
|
||||
|
||||
@@ -1008,9 +995,11 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
del lora_info
|
||||
return
|
||||
|
||||
unet_info = context.models.load(self.unet.unet)
|
||||
assert isinstance(unet_info.model, UNet2DConditionModel)
|
||||
with (
|
||||
ExitStack() as exit_stack,
|
||||
context.models.load(self.unet.unet).model_on_device() as (cached_weights, unet),
|
||||
unet_info.model_on_device() as (cached_weights, unet),
|
||||
ModelPatcher.apply_freeu(unet, self.unet.freeu_config),
|
||||
SeamlessExt.static_patch_model(unet, self.unet.seamless_axes), # FIXME
|
||||
# Apply the LoRA after unet has been moved to its target device for faster patching.
|
||||
@@ -1023,20 +1012,19 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
),
|
||||
):
|
||||
assert isinstance(unet, UNet2DConditionModel)
|
||||
latents = latents.to(device=device, dtype=unet.dtype)
|
||||
latents = latents.to(device=unet.device, dtype=unet.dtype)
|
||||
if noise is not None:
|
||||
noise = noise.to(device=device, dtype=unet.dtype)
|
||||
noise = noise.to(device=unet.device, dtype=unet.dtype)
|
||||
if mask is not None:
|
||||
mask = mask.to(device=device, dtype=unet.dtype)
|
||||
mask = mask.to(device=unet.device, dtype=unet.dtype)
|
||||
if masked_latents is not None:
|
||||
masked_latents = masked_latents.to(device=device, dtype=unet.dtype)
|
||||
masked_latents = masked_latents.to(device=unet.device, dtype=unet.dtype)
|
||||
|
||||
scheduler = get_scheduler(
|
||||
context=context,
|
||||
scheduler_info=self.unet.scheduler,
|
||||
scheduler_name=self.scheduler,
|
||||
seed=seed,
|
||||
unet_config=unet_config,
|
||||
)
|
||||
|
||||
pipeline = self.create_pipeline(unet, scheduler)
|
||||
@@ -1046,7 +1034,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
context=context,
|
||||
positive_conditioning_field=self.positive_conditioning,
|
||||
negative_conditioning_field=self.negative_conditioning,
|
||||
device=device,
|
||||
device=unet.device,
|
||||
dtype=unet.dtype,
|
||||
latent_height=latent_height,
|
||||
latent_width=latent_width,
|
||||
@@ -1059,7 +1047,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
context=context,
|
||||
control_input=self.control,
|
||||
latents_shape=latents.shape,
|
||||
device=device,
|
||||
# do_classifier_free_guidance=(self.cfg_scale >= 1.0))
|
||||
do_classifier_free_guidance=True,
|
||||
exit_stack=exit_stack,
|
||||
@@ -1077,7 +1064,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
|
||||
timesteps, init_timestep, scheduler_step_kwargs = self.init_scheduler(
|
||||
scheduler,
|
||||
device=device,
|
||||
device=unet.device,
|
||||
steps=self.steps,
|
||||
denoising_start=self.denoising_start,
|
||||
denoising_end=self.denoising_end,
|
||||
|
||||
@@ -300,13 +300,6 @@ class BoundingBoxField(BaseModel):
|
||||
raise ValueError(f"y_min ({self.y_min}) is greater than y_max ({self.y_max}).")
|
||||
return self
|
||||
|
||||
def tuple(self) -> Tuple[int, int, int, int]:
|
||||
"""
|
||||
Returns the bounding box as a tuple suitable for use with PIL's `Image.crop()` method.
|
||||
This method returns a tuple of the form (left, upper, right, lower) == (x_min, y_min, x_max, y_max).
|
||||
"""
|
||||
return (self.x_min, self.y_min, self.x_max, self.y_max)
|
||||
|
||||
|
||||
class MetadataField(RootModel[dict[str, Any]]):
|
||||
"""
|
||||
|
||||
@@ -199,8 +199,8 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
else None
|
||||
)
|
||||
|
||||
transformer_config = context.models.get_config(self.transformer.transformer)
|
||||
is_schnell = "schnell" in getattr(transformer_config, "config_path", "")
|
||||
transformer_info = context.models.load(self.transformer.transformer)
|
||||
is_schnell = "schnell" in getattr(transformer_info.config, "config_path", "")
|
||||
|
||||
# Calculate the timestep schedule.
|
||||
timesteps = get_schedule(
|
||||
@@ -240,9 +240,6 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
if len(timesteps) <= 1:
|
||||
return x
|
||||
|
||||
if is_schnell and self.control_lora:
|
||||
raise ValueError("Control LoRAs cannot be used with FLUX Schnell")
|
||||
|
||||
# Prepare the extra image conditioning tensor if a FLUX structural control image is provided.
|
||||
img_cond = self._prep_structural_control_img_cond(context)
|
||||
|
||||
@@ -276,7 +273,7 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
# TODO(ryand): We should really do this in a separate invocation to benefit from caching.
|
||||
ip_adapter_fields = self._normalize_ip_adapter_fields()
|
||||
pos_image_prompt_clip_embeds, neg_image_prompt_clip_embeds = self._prep_ip_adapter_image_prompt_clip_embeds(
|
||||
ip_adapter_fields, context, device=x.device
|
||||
ip_adapter_fields, context
|
||||
)
|
||||
|
||||
cfg_scale = self.prep_cfg_scale(
|
||||
@@ -299,11 +296,9 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
)
|
||||
|
||||
# Load the transformer model.
|
||||
(cached_weights, transformer) = exit_stack.enter_context(
|
||||
context.models.load(self.transformer.transformer).model_on_device()
|
||||
)
|
||||
(cached_weights, transformer) = exit_stack.enter_context(transformer_info.model_on_device())
|
||||
assert isinstance(transformer, Flux)
|
||||
config = transformer_config
|
||||
config = transformer_info.config
|
||||
assert config is not None
|
||||
|
||||
# Determine if the model is quantized.
|
||||
@@ -514,18 +509,15 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
# before loading the models. Then make sure that all VAE encoding is done before loading the ControlNets to
|
||||
# minimize peak memory.
|
||||
|
||||
# First, load the ControlNet models so that we can determine the ControlNet types.
|
||||
controlnet_models = [context.models.load(controlnet.control_model) for controlnet in controlnets]
|
||||
|
||||
# Calculate the controlnet conditioning tensors.
|
||||
# We do this before loading the ControlNet models because it may require running the VAE, and we are trying to
|
||||
# keep peak memory down.
|
||||
controlnet_conds: list[torch.Tensor] = []
|
||||
for controlnet in controlnets:
|
||||
for controlnet, controlnet_model in zip(controlnets, controlnet_models, strict=True):
|
||||
image = context.images.get_pil(controlnet.image.image_name)
|
||||
|
||||
# HACK(ryand): We have to load the ControlNet model to determine whether the VAE needs to be run. We really
|
||||
# shouldn't have to load the model here. There's a risk that the model will be dropped from the model cache
|
||||
# before we load it into VRAM and thus we'll have to load it again (context:
|
||||
# https://github.com/invoke-ai/InvokeAI/issues/7513).
|
||||
controlnet_model = context.models.load(controlnet.control_model)
|
||||
if isinstance(controlnet_model.model, InstantXControlNetFlux):
|
||||
if self.controlnet_vae is None:
|
||||
raise ValueError("A ControlNet VAE is required when using an InstantX FLUX ControlNet.")
|
||||
@@ -555,8 +547,10 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
# Finally, load the ControlNet models and initialize the ControlNet extensions.
|
||||
controlnet_extensions: list[XLabsControlNetExtension | InstantXControlNetExtension] = []
|
||||
for controlnet, controlnet_cond in zip(controlnets, controlnet_conds, strict=True):
|
||||
model = exit_stack.enter_context(context.models.load(controlnet.control_model))
|
||||
for controlnet, controlnet_cond, controlnet_model in zip(
|
||||
controlnets, controlnet_conds, controlnet_models, strict=True
|
||||
):
|
||||
model = exit_stack.enter_context(controlnet_model)
|
||||
|
||||
if isinstance(model, XLabsControlNetFlux):
|
||||
controlnet_extensions.append(
|
||||
@@ -626,7 +620,6 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
self,
|
||||
ip_adapter_fields: list[IPAdapterField],
|
||||
context: InvocationContext,
|
||||
device: torch.device,
|
||||
) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
|
||||
"""Run the IPAdapter CLIPVisionModel, returning image prompt embeddings."""
|
||||
clip_image_processor = CLIPImageProcessor()
|
||||
@@ -666,11 +659,11 @@ class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
assert isinstance(image_encoder_model, CLIPVisionModelWithProjection)
|
||||
|
||||
clip_image: torch.Tensor = clip_image_processor(images=pos_images, return_tensors="pt").pixel_values
|
||||
clip_image = clip_image.to(device=device, dtype=image_encoder_model.dtype)
|
||||
clip_image = clip_image.to(device=image_encoder_model.device, dtype=image_encoder_model.dtype)
|
||||
pos_clip_image_embeds = image_encoder_model(clip_image).image_embeds
|
||||
|
||||
clip_image = clip_image_processor(images=neg_images, return_tensors="pt").pixel_values
|
||||
clip_image = clip_image.to(device=device, dtype=image_encoder_model.dtype)
|
||||
clip_image = clip_image.to(device=image_encoder_model.device, dtype=image_encoder_model.dtype)
|
||||
neg_clip_image_embeds = image_encoder_model(clip_image).image_embeds
|
||||
|
||||
pos_image_prompt_clip_embeds.append(pos_clip_image_embeds)
|
||||
|
||||
@@ -8,7 +8,7 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import CLIPField, LoRAField, ModelIdentifierField, T5EncoderField, TransformerField
|
||||
from invokeai.app.invocations.model import CLIPField, LoRAField, ModelIdentifierField, TransformerField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import BaseModelType
|
||||
|
||||
@@ -21,9 +21,6 @@ class FluxLoRALoaderOutput(BaseInvocationOutput):
|
||||
default=None, description=FieldDescriptions.transformer, title="FLUX Transformer"
|
||||
)
|
||||
clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP")
|
||||
t5_encoder: Optional[T5EncoderField] = OutputField(
|
||||
default=None, description=FieldDescriptions.t5_encoder, title="T5 Encoder"
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -31,7 +28,7 @@ class FluxLoRALoaderOutput(BaseInvocationOutput):
|
||||
title="FLUX LoRA",
|
||||
tags=["lora", "model", "flux"],
|
||||
category="model",
|
||||
version="1.2.0",
|
||||
version="1.1.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxLoRALoaderInvocation(BaseInvocation):
|
||||
@@ -53,12 +50,6 @@ class FluxLoRALoaderInvocation(BaseInvocation):
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
)
|
||||
t5_encoder: T5EncoderField | None = InputField(
|
||||
default=None,
|
||||
title="T5 Encoder",
|
||||
description=FieldDescriptions.t5_encoder,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxLoRALoaderOutput:
|
||||
lora_key = self.lora.key
|
||||
@@ -71,8 +62,6 @@ class FluxLoRALoaderInvocation(BaseInvocation):
|
||||
raise ValueError(f'LoRA "{lora_key}" already applied to transformer.')
|
||||
if self.clip and any(lora.lora.key == lora_key for lora in self.clip.loras):
|
||||
raise ValueError(f'LoRA "{lora_key}" already applied to CLIP encoder.')
|
||||
if self.t5_encoder and any(lora.lora.key == lora_key for lora in self.t5_encoder.loras):
|
||||
raise ValueError(f'LoRA "{lora_key}" already applied to T5 encoder.')
|
||||
|
||||
output = FluxLoRALoaderOutput()
|
||||
|
||||
@@ -93,14 +82,6 @@ class FluxLoRALoaderInvocation(BaseInvocation):
|
||||
weight=self.weight,
|
||||
)
|
||||
)
|
||||
if self.t5_encoder is not None:
|
||||
output.t5_encoder = self.t5_encoder.model_copy(deep=True)
|
||||
output.t5_encoder.loras.append(
|
||||
LoRAField(
|
||||
lora=self.lora,
|
||||
weight=self.weight,
|
||||
)
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
@@ -110,14 +91,14 @@ class FluxLoRALoaderInvocation(BaseInvocation):
|
||||
title="FLUX LoRA Collection Loader",
|
||||
tags=["lora", "model", "flux"],
|
||||
category="model",
|
||||
version="1.3.0",
|
||||
version="1.1.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FLUXLoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of LoRAs to a FLUX transformer."""
|
||||
|
||||
loras: Optional[LoRAField | list[LoRAField]] = InputField(
|
||||
default=None, description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
loras: LoRAField | list[LoRAField] = InputField(
|
||||
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
)
|
||||
|
||||
transformer: Optional[TransformerField] = InputField(
|
||||
@@ -132,30 +113,13 @@ class FLUXLoRACollectionLoader(BaseInvocation):
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
)
|
||||
t5_encoder: T5EncoderField | None = InputField(
|
||||
default=None,
|
||||
title="T5 Encoder",
|
||||
description=FieldDescriptions.t5_encoder,
|
||||
input=Input.Connection,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxLoRALoaderOutput:
|
||||
output = FluxLoRALoaderOutput()
|
||||
loras = self.loras if isinstance(self.loras, list) else [self.loras]
|
||||
added_loras: list[str] = []
|
||||
|
||||
if self.transformer is not None:
|
||||
output.transformer = self.transformer.model_copy(deep=True)
|
||||
|
||||
if self.clip is not None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
|
||||
if self.t5_encoder is not None:
|
||||
output.t5_encoder = self.t5_encoder.model_copy(deep=True)
|
||||
|
||||
for lora in loras:
|
||||
if lora is None:
|
||||
continue
|
||||
if lora.lora.key in added_loras:
|
||||
continue
|
||||
|
||||
@@ -166,13 +130,14 @@ class FLUXLoRACollectionLoader(BaseInvocation):
|
||||
|
||||
added_loras.append(lora.lora.key)
|
||||
|
||||
if self.transformer is not None and output.transformer is not None:
|
||||
if self.transformer is not None:
|
||||
if output.transformer is None:
|
||||
output.transformer = self.transformer.model_copy(deep=True)
|
||||
output.transformer.loras.append(lora)
|
||||
|
||||
if self.clip is not None and output.clip is not None:
|
||||
if self.clip is not None:
|
||||
if output.clip is None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
output.clip.loras.append(lora)
|
||||
|
||||
if self.t5_encoder is not None and output.t5_encoder is not None:
|
||||
output.t5_encoder.loras.append(lora)
|
||||
|
||||
return output
|
||||
|
||||
@@ -10,10 +10,6 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.t5_model_identifier import (
|
||||
preprocess_t5_encoder_model_identifier,
|
||||
preprocess_t5_tokenizer_model_identifier,
|
||||
)
|
||||
from invokeai.backend.flux.util import max_seq_lengths
|
||||
from invokeai.backend.model_manager.config import (
|
||||
CheckpointConfigBase,
|
||||
@@ -40,7 +36,7 @@ class FluxModelLoaderOutput(BaseInvocationOutput):
|
||||
title="Flux Main Model",
|
||||
tags=["model", "flux"],
|
||||
category="model",
|
||||
version="1.0.5",
|
||||
version="1.0.4",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxModelLoaderInvocation(BaseInvocation):
|
||||
@@ -78,8 +74,8 @@ class FluxModelLoaderInvocation(BaseInvocation):
|
||||
tokenizer = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
clip_encoder = self.clip_embed_model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
|
||||
tokenizer2 = preprocess_t5_tokenizer_model_identifier(self.t5_encoder_model)
|
||||
t5_encoder = preprocess_t5_encoder_model_identifier(self.t5_encoder_model)
|
||||
tokenizer2 = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
t5_encoder = self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
|
||||
transformer_config = context.models.get_config(transformer)
|
||||
assert isinstance(transformer_config, CheckpointConfigBase)
|
||||
@@ -87,7 +83,7 @@ class FluxModelLoaderInvocation(BaseInvocation):
|
||||
return FluxModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder, loras=[]),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
max_seq_len=max_seq_lengths[transformer_config.config_path],
|
||||
)
|
||||
|
||||
@@ -2,7 +2,7 @@ from contextlib import ExitStack
|
||||
from typing import Iterator, Literal, Optional, Tuple
|
||||
|
||||
import torch
|
||||
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer, T5TokenizerFast
|
||||
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
@@ -19,9 +19,10 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.modules.conditioner import HFEncoder
|
||||
from invokeai.backend.model_manager.config import ModelFormat
|
||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX, FLUX_LORA_T5_PREFIX
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, FLUXConditioningInfo
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -71,44 +72,12 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
def _t5_encode(self, context: InvocationContext) -> torch.Tensor:
|
||||
prompt = [self.prompt]
|
||||
|
||||
t5_encoder_info = context.models.load(self.t5_encoder.text_encoder)
|
||||
t5_encoder_config = t5_encoder_info.config
|
||||
assert t5_encoder_config is not None
|
||||
|
||||
with (
|
||||
t5_encoder_info.model_on_device() as (cached_weights, t5_text_encoder),
|
||||
context.models.load(self.t5_encoder.text_encoder) as t5_text_encoder,
|
||||
context.models.load(self.t5_encoder.tokenizer) as t5_tokenizer,
|
||||
ExitStack() as exit_stack,
|
||||
):
|
||||
assert isinstance(t5_text_encoder, T5EncoderModel)
|
||||
assert isinstance(t5_tokenizer, (T5Tokenizer, T5TokenizerFast))
|
||||
|
||||
# Determine if the model is quantized.
|
||||
# If the model is quantized, then we need to apply the LoRA weights as sidecar layers. This results in
|
||||
# slower inference than direct patching, but is agnostic to the quantization format.
|
||||
if t5_encoder_config.format in [ModelFormat.T5Encoder, ModelFormat.Diffusers]:
|
||||
model_is_quantized = False
|
||||
elif t5_encoder_config.format in [
|
||||
ModelFormat.BnbQuantizedLlmInt8b,
|
||||
ModelFormat.BnbQuantizednf4b,
|
||||
ModelFormat.GGUFQuantized,
|
||||
]:
|
||||
model_is_quantized = True
|
||||
else:
|
||||
raise ValueError(f"Unsupported model format: {t5_encoder_config.format}")
|
||||
|
||||
# Apply LoRA models to the T5 encoder.
|
||||
# Note: We apply the LoRA after the encoder has been moved to its target device for faster patching.
|
||||
exit_stack.enter_context(
|
||||
LayerPatcher.apply_smart_model_patches(
|
||||
model=t5_text_encoder,
|
||||
patches=self._t5_lora_iterator(context),
|
||||
prefix=FLUX_LORA_T5_PREFIX,
|
||||
dtype=t5_text_encoder.dtype,
|
||||
cached_weights=cached_weights,
|
||||
force_sidecar_patching=model_is_quantized,
|
||||
)
|
||||
)
|
||||
assert isinstance(t5_tokenizer, T5Tokenizer)
|
||||
|
||||
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
|
||||
|
||||
@@ -120,10 +89,7 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
|
||||
def _clip_encode(self, context: InvocationContext) -> torch.Tensor:
|
||||
prompt = [self.prompt]
|
||||
|
||||
clip_text_encoder_info = context.models.load(self.clip.text_encoder)
|
||||
clip_text_encoder_config = clip_text_encoder_info.config
|
||||
assert clip_text_encoder_config is not None
|
||||
|
||||
with (
|
||||
clip_text_encoder_info.model_on_device() as (cached_weights, clip_text_encoder),
|
||||
@@ -133,6 +99,9 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
assert isinstance(clip_text_encoder, CLIPTextModel)
|
||||
assert isinstance(clip_tokenizer, CLIPTokenizer)
|
||||
|
||||
clip_text_encoder_config = clip_text_encoder_info.config
|
||||
assert clip_text_encoder_config is not None
|
||||
|
||||
# Apply LoRA models to the CLIP encoder.
|
||||
# Note: We apply the LoRA after the transformer has been moved to its target device for faster patching.
|
||||
if clip_text_encoder_config.format in [ModelFormat.Diffusers]:
|
||||
@@ -142,7 +111,7 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
model=clip_text_encoder,
|
||||
patches=self._clip_lora_iterator(context),
|
||||
prefix=FLUX_LORA_CLIP_PREFIX,
|
||||
dtype=clip_text_encoder.dtype,
|
||||
dtype=TorchDevice.choose_torch_dtype(),
|
||||
cached_weights=cached_weights,
|
||||
)
|
||||
)
|
||||
@@ -164,10 +133,3 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
assert isinstance(lora_info.model, ModelPatchRaw)
|
||||
yield (lora_info.model, lora.weight)
|
||||
del lora_info
|
||||
|
||||
def _t5_lora_iterator(self, context: InvocationContext) -> Iterator[Tuple[ModelPatchRaw, float]]:
|
||||
for lora in self.t5_encoder.loras:
|
||||
lora_info = context.models.load(lora.lora)
|
||||
assert isinstance(lora_info.model, ModelPatchRaw)
|
||||
yield (lora_info.model, lora.weight)
|
||||
del lora_info
|
||||
|
||||
@@ -25,7 +25,7 @@ from invokeai.backend.util.devices import TorchDevice
|
||||
title="FLUX Latents to Image",
|
||||
tags=["latents", "image", "vae", "l2i", "flux"],
|
||||
category="latents",
|
||||
version="1.0.1",
|
||||
version="1.0.0",
|
||||
)
|
||||
class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates an image from latents."""
|
||||
@@ -42,16 +42,15 @@ class FluxVaeDecodeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoEncoder) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
|
||||
# element size (precision).
|
||||
# element size (precision). This estimate is accurate for both SD1 and SDXL.
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
scaling_constant = 1090 # Determined experimentally.
|
||||
# TODO(ryand): Need to tune this value, it was copied from the SD1 implementation.
|
||||
scaling_constant = 960 # Determined experimentally.
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
|
||||
# We add a 20% buffer to the working memory estimate to be safe.
|
||||
working_memory = working_memory * 1.2
|
||||
return int(working_memory)
|
||||
return working_memory
|
||||
|
||||
def _vae_decode(self, vae_info: LoadedModel, latents: torch.Tensor) -> Image.Image:
|
||||
estimated_working_memory = self._estimate_working_memory(latents, vae_info.model)
|
||||
|
||||
@@ -21,7 +21,7 @@ class IdealSizeOutput(BaseInvocationOutput):
|
||||
"ideal_size",
|
||||
title="Ideal Size",
|
||||
tags=["latents", "math", "ideal_size"],
|
||||
version="1.0.4",
|
||||
version="1.0.3",
|
||||
)
|
||||
class IdealSizeInvocation(BaseInvocation):
|
||||
"""Calculates the ideal size for generation to avoid duplication"""
|
||||
@@ -41,16 +41,11 @@ class IdealSizeInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> IdealSizeOutput:
|
||||
unet_config = context.models.get_config(self.unet.unet.key)
|
||||
aspect = self.width / self.height
|
||||
|
||||
if unet_config.base == BaseModelType.StableDiffusion1:
|
||||
dimension = 512
|
||||
elif unet_config.base == BaseModelType.StableDiffusion2:
|
||||
dimension: float = 512
|
||||
if unet_config.base == BaseModelType.StableDiffusion2:
|
||||
dimension = 768
|
||||
elif unet_config.base in (BaseModelType.StableDiffusionXL, BaseModelType.Flux, BaseModelType.StableDiffusion3):
|
||||
elif unet_config.base == BaseModelType.StableDiffusionXL:
|
||||
dimension = 1024
|
||||
else:
|
||||
raise ValueError(f"Unsupported model type: {unet_config.base}")
|
||||
|
||||
dimension = dimension * self.multiplier
|
||||
min_dimension = math.floor(dimension * 0.5)
|
||||
model_area = dimension * dimension # hardcoded for now since all models are trained on square images
|
||||
|
||||
@@ -13,7 +13,6 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
)
|
||||
from invokeai.app.invocations.constants import IMAGE_MODES
|
||||
from invokeai.app.invocations.fields import (
|
||||
BoundingBoxField,
|
||||
ColorField,
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
@@ -24,7 +23,6 @@ from invokeai.app.invocations.fields import (
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.misc import SEED_MAX
|
||||
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
|
||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||
|
||||
@@ -163,12 +161,12 @@ class ImagePasteInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
crop: bool = InputField(default=False, description="Crop to base image dimensions")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
base_image = context.images.get_pil(self.base_image.image_name, mode="RGBA")
|
||||
image = context.images.get_pil(self.image.image_name, mode="RGBA")
|
||||
base_image = context.images.get_pil(self.base_image.image_name)
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
mask = None
|
||||
if self.mask is not None:
|
||||
mask = context.images.get_pil(self.mask.image_name, mode="L")
|
||||
mask = ImageOps.invert(mask)
|
||||
mask = context.images.get_pil(self.mask.image_name)
|
||||
mask = ImageOps.invert(mask.convert("L"))
|
||||
# TODO: probably shouldn't invert mask here... should user be required to do it?
|
||||
|
||||
min_x = min(0, self.x)
|
||||
@@ -178,11 +176,7 @@ class ImagePasteInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
new_image = Image.new(mode="RGBA", size=(max_x - min_x, max_y - min_y), color=(0, 0, 0, 0))
|
||||
new_image.paste(base_image, (abs(min_x), abs(min_y)))
|
||||
|
||||
# Create a temporary image to paste the image with transparency
|
||||
temp_image = Image.new("RGBA", new_image.size)
|
||||
temp_image.paste(image, (max(0, self.x), max(0, self.y)), mask=mask)
|
||||
new_image = Image.alpha_composite(new_image, temp_image)
|
||||
new_image.paste(image, (max(0, self.x), max(0, self.y)), mask=mask)
|
||||
|
||||
if self.crop:
|
||||
base_w, base_h = base_image.size
|
||||
@@ -307,44 +301,14 @@ class ImageBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
blur_type: Literal["gaussian", "box"] = InputField(default="gaussian", description="The type of blur")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, mode="RGBA")
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
# Split the image into RGBA channels
|
||||
r, g, b, a = image.split()
|
||||
|
||||
# Premultiply RGB channels by alpha
|
||||
premultiplied_image = ImageChops.multiply(image, a.convert("RGBA"))
|
||||
premultiplied_image.putalpha(a)
|
||||
|
||||
# Apply the blur
|
||||
blur = (
|
||||
ImageFilter.GaussianBlur(self.radius) if self.blur_type == "gaussian" else ImageFilter.BoxBlur(self.radius)
|
||||
)
|
||||
blurred_image = premultiplied_image.filter(blur)
|
||||
blur_image = image.filter(blur)
|
||||
|
||||
# Split the blurred image into RGBA channels
|
||||
r, g, b, a_orig = blurred_image.split()
|
||||
|
||||
# Convert to float using NumPy. float 32/64 division are much faster than float 16
|
||||
r = numpy.array(r, dtype=numpy.float32)
|
||||
g = numpy.array(g, dtype=numpy.float32)
|
||||
b = numpy.array(b, dtype=numpy.float32)
|
||||
a = numpy.array(a_orig, dtype=numpy.float32) / 255.0 # Normalize alpha to [0, 1]
|
||||
|
||||
# Unpremultiply RGB channels by alpha
|
||||
r /= a + 1e-6 # Add a small epsilon to avoid division by zero
|
||||
g /= a + 1e-6
|
||||
b /= a + 1e-6
|
||||
|
||||
# Convert back to PIL images
|
||||
r = Image.fromarray(numpy.uint8(numpy.clip(r, 0, 255)))
|
||||
g = Image.fromarray(numpy.uint8(numpy.clip(g, 0, 255)))
|
||||
b = Image.fromarray(numpy.uint8(numpy.clip(b, 0, 255)))
|
||||
|
||||
# Merge back into a single image
|
||||
result_image = Image.merge("RGBA", (r, g, b, a_orig))
|
||||
|
||||
image_dto = context.images.save(image=result_image)
|
||||
image_dto = context.images.save(image=blur_image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
@@ -843,7 +807,7 @@ CHANNEL_FORMATS = {
|
||||
"value",
|
||||
],
|
||||
category="image",
|
||||
version="1.2.3",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add or subtract a value from a specific color channel of an image."""
|
||||
@@ -853,22 +817,18 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
offset: int = InputField(default=0, ge=-255, le=255, description="The amount to adjust the channel by")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, "RGBA")
|
||||
pil_image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
# extract the channel and mode from the input and reference tuple
|
||||
mode = CHANNEL_FORMATS[self.channel][0]
|
||||
channel_number = CHANNEL_FORMATS[self.channel][1]
|
||||
|
||||
# Convert PIL image to new format
|
||||
converted_image = numpy.array(image.convert(mode)).astype(int)
|
||||
converted_image = numpy.array(pil_image.convert(mode)).astype(int)
|
||||
image_channel = converted_image[:, :, channel_number]
|
||||
|
||||
if self.channel == "Hue (HSV)":
|
||||
# loop around the values because hue is special
|
||||
image_channel = (image_channel + self.offset) % 256
|
||||
else:
|
||||
# Adjust the value, clipping to 0..255
|
||||
image_channel = numpy.clip(image_channel + self.offset, 0, 255)
|
||||
# Adjust the value, clipping to 0..255
|
||||
image_channel = numpy.clip(image_channel + self.offset, 0, 255)
|
||||
|
||||
# Put the channel back into the image
|
||||
converted_image[:, :, channel_number] = image_channel
|
||||
@@ -876,10 +836,6 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
# Convert back to RGBA format and output
|
||||
pil_image = Image.fromarray(converted_image.astype(numpy.uint8), mode=mode).convert("RGBA")
|
||||
|
||||
# restore the alpha channel
|
||||
if self.channel != "Alpha (RGBA)":
|
||||
pil_image.putalpha(image.getchannel("A"))
|
||||
|
||||
image_dto = context.images.save(image=pil_image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
@@ -907,7 +863,7 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"value",
|
||||
],
|
||||
category="image",
|
||||
version="1.2.3",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Scale a specific color channel of an image."""
|
||||
@@ -918,14 +874,14 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
invert_channel: bool = InputField(default=False, description="Invert the channel after scaling")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
pil_image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
# extract the channel and mode from the input and reference tuple
|
||||
mode = CHANNEL_FORMATS[self.channel][0]
|
||||
channel_number = CHANNEL_FORMATS[self.channel][1]
|
||||
|
||||
# Convert PIL image to new format
|
||||
converted_image = numpy.array(image.convert(mode)).astype(float)
|
||||
converted_image = numpy.array(pil_image.convert(mode)).astype(float)
|
||||
image_channel = converted_image[:, :, channel_number]
|
||||
|
||||
# Adjust the value, clipping to 0..255
|
||||
@@ -941,10 +897,6 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
# Convert back to RGBA format and output
|
||||
pil_image = Image.fromarray(converted_image.astype(numpy.uint8), mode=mode).convert("RGBA")
|
||||
|
||||
# restore the alpha channel
|
||||
if self.channel != "Alpha (RGBA)":
|
||||
pil_image.putalpha(image.getchannel("A"))
|
||||
|
||||
image_dto = context.images.save(image=pil_image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
@@ -1010,10 +962,10 @@ class CanvasPasteBackInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
@invocation(
|
||||
"mask_from_id",
|
||||
title="Mask from Segmented Image",
|
||||
title="Mask from ID",
|
||||
tags=["image", "mask", "id"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
version="1.0.0",
|
||||
)
|
||||
class MaskFromIDInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generate a mask for a particular color in an ID Map"""
|
||||
@@ -1023,24 +975,40 @@ class MaskFromIDInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
threshold: int = InputField(default=100, description="Threshold for color detection")
|
||||
invert: bool = InputField(default=False, description="Whether or not to invert the mask")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, mode="RGBA")
|
||||
def rgba_to_hex(self, rgba_color: tuple[int, int, int, int]):
|
||||
r, g, b, a = rgba_color
|
||||
hex_code = "#{:02X}{:02X}{:02X}{:02X}".format(r, g, b, int(a * 255))
|
||||
return hex_code
|
||||
|
||||
np_color = numpy.array(self.color.tuple())
|
||||
def id_to_mask(self, id_mask: Image.Image, color: tuple[int, int, int, int], threshold: int = 100):
|
||||
if id_mask.mode != "RGB":
|
||||
id_mask = id_mask.convert("RGB")
|
||||
|
||||
# Can directly just use the tuple but I'll leave this rgba_to_hex here
|
||||
# incase anyone prefers using hex codes directly instead of the color picker
|
||||
hex_color_str = self.rgba_to_hex(color)
|
||||
rgb_color = numpy.array([int(hex_color_str[i : i + 2], 16) for i in (1, 3, 5)])
|
||||
|
||||
# Maybe there's a faster way to calculate this distance but I can't think of any right now.
|
||||
color_distance = numpy.linalg.norm(image - np_color, axis=-1)
|
||||
color_distance = numpy.linalg.norm(id_mask - rgb_color, axis=-1)
|
||||
|
||||
# Create a mask based on the threshold and the distance calculated above
|
||||
binary_mask = (color_distance < self.threshold).astype(numpy.uint8) * 255
|
||||
binary_mask = (color_distance < threshold).astype(numpy.uint8) * 255
|
||||
|
||||
# Convert the mask back to PIL
|
||||
binary_mask_pil = Image.fromarray(binary_mask)
|
||||
|
||||
if self.invert:
|
||||
binary_mask_pil = ImageOps.invert(binary_mask_pil)
|
||||
return binary_mask_pil
|
||||
|
||||
image_dto = context.images.save(image=binary_mask_pil, image_category=ImageCategory.MASK)
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
mask = self.id_to_mask(image, self.color.tuple(), self.threshold)
|
||||
|
||||
if self.invert:
|
||||
mask = ImageOps.invert(mask)
|
||||
|
||||
image_dto = context.images.save(image=mask, image_category=ImageCategory.MASK)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
@@ -1087,123 +1055,3 @@ class CanvasV2MaskAndCropInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
image_dto = context.images.save(image=generated_image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
@invocation(
|
||||
"img_noise",
|
||||
title="Add Image Noise",
|
||||
tags=["image", "noise"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
)
|
||||
class ImageNoiseInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add noise to an image"""
|
||||
|
||||
image: ImageField = InputField(description="The image to add noise to")
|
||||
seed: int = InputField(
|
||||
default=0,
|
||||
ge=0,
|
||||
le=SEED_MAX,
|
||||
description=FieldDescriptions.seed,
|
||||
)
|
||||
noise_type: Literal["gaussian", "salt_and_pepper"] = InputField(
|
||||
default="gaussian",
|
||||
description="The type of noise to add",
|
||||
)
|
||||
amount: float = InputField(default=0.1, ge=0, le=1, description="The amount of noise to add")
|
||||
noise_color: bool = InputField(default=True, description="Whether to add colored noise")
|
||||
size: int = InputField(default=1, ge=1, description="The size of the noise points")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, mode="RGBA")
|
||||
|
||||
# Save out the alpha channel
|
||||
alpha = image.getchannel("A")
|
||||
|
||||
# Set the seed for numpy random
|
||||
rs = numpy.random.RandomState(numpy.random.MT19937(numpy.random.SeedSequence(self.seed)))
|
||||
|
||||
if self.noise_type == "gaussian":
|
||||
if self.noise_color:
|
||||
noise = rs.normal(0, 1, (image.height // self.size, image.width // self.size, 3)) * 255
|
||||
else:
|
||||
noise = rs.normal(0, 1, (image.height // self.size, image.width // self.size)) * 255
|
||||
noise = numpy.stack([noise] * 3, axis=-1)
|
||||
elif self.noise_type == "salt_and_pepper":
|
||||
if self.noise_color:
|
||||
noise = rs.choice(
|
||||
[0, 255], (image.height // self.size, image.width // self.size, 3), p=[1 - self.amount, self.amount]
|
||||
)
|
||||
else:
|
||||
noise = rs.choice(
|
||||
[0, 255], (image.height // self.size, image.width // self.size), p=[1 - self.amount, self.amount]
|
||||
)
|
||||
noise = numpy.stack([noise] * 3, axis=-1)
|
||||
|
||||
noise = Image.fromarray(noise.astype(numpy.uint8), mode="RGB").resize(
|
||||
(image.width, image.height), Image.Resampling.NEAREST
|
||||
)
|
||||
noisy_image = Image.blend(image.convert("RGB"), noise, self.amount).convert("RGBA")
|
||||
|
||||
# Paste back the alpha channel
|
||||
noisy_image.putalpha(alpha)
|
||||
|
||||
image_dto = context.images.save(image=noisy_image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
@invocation(
|
||||
"crop_image_to_bounding_box",
|
||||
title="Crop Image to Bounding Box",
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
tags=["image", "crop"],
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class CropImageToBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Crop an image to the given bounding box. If the bounding box is omitted, the image is cropped to the non-transparent pixels."""
|
||||
|
||||
image: ImageField = InputField(description="The image to crop")
|
||||
bounding_box: BoundingBoxField | None = InputField(
|
||||
default=None, description="The bounding box to crop the image to"
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
bounding_box = self.bounding_box.tuple() if self.bounding_box is not None else image.getbbox()
|
||||
|
||||
cropped_image = image.crop(bounding_box)
|
||||
|
||||
image_dto = context.images.save(image=cropped_image)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
@invocation(
|
||||
"paste_image_into_bounding_box",
|
||||
title="Paste Image into Bounding Box",
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
tags=["image", "crop"],
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class PasteImageIntoBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Paste the source image into the target image at the given bounding box.
|
||||
|
||||
The source image must be the same size as the bounding box, and the bounding box must fit within the target image."""
|
||||
|
||||
source_image: ImageField = InputField(description="The image to paste")
|
||||
target_image: ImageField = InputField(description="The image to paste into")
|
||||
bounding_box: BoundingBoxField = InputField(description="The bounding box to paste the image into")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
source_image = context.images.get_pil(self.source_image.image_name, mode="RGBA")
|
||||
target_image = context.images.get_pil(self.target_image.image_name, mode="RGBA")
|
||||
|
||||
bounding_box = self.bounding_box.tuple()
|
||||
|
||||
target_image.paste(source_image, bounding_box, source_image)
|
||||
|
||||
image_dto = context.images.save(image=target_image)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
@@ -13,7 +13,7 @@ from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.constants import DEFAULT_PRECISION, LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
@@ -26,7 +26,6 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.stable_diffusion.vae_tiling import patch_vae_tiling_params
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -50,7 +49,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
# NOTE: tile_size = 0 is a special value. We use this rather than `int | None`, because the workflow UI does not
|
||||
# offer a way to directly set None values.
|
||||
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
|
||||
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
|
||||
fp32: bool = InputField(default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32)
|
||||
|
||||
@staticmethod
|
||||
def vae_encode(
|
||||
@@ -99,7 +98,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
# non_noised_latents_from_image
|
||||
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae.dtype)
|
||||
image_tensor = image_tensor.to(device=vae.device, dtype=vae.dtype)
|
||||
with torch.inference_mode(), tiling_context:
|
||||
latents = ImageToLatentsInvocation._encode_to_tensor(vae, image_tensor)
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.constants import DEFAULT_PRECISION, LATENT_SCALE_FACTOR
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
@@ -51,60 +51,37 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
# NOTE: tile_size = 0 is a special value. We use this rather than `int | None`, because the workflow UI does not
|
||||
# offer a way to directly set None values.
|
||||
tile_size: int = InputField(default=0, multiple_of=8, description=FieldDescriptions.vae_tile_size)
|
||||
fp32: bool = InputField(default=False, description=FieldDescriptions.fp32)
|
||||
fp32: bool = InputField(default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32)
|
||||
|
||||
def _estimate_working_memory(
|
||||
self, latents: torch.Tensor, use_tiling: bool, vae: AutoencoderKL | AutoencoderTiny
|
||||
) -> int:
|
||||
def _estimate_working_memory(self, latents: torch.Tensor) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
|
||||
# element size (precision). This estimate is accurate for both SD1 and SDXL.
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
element_size = 4 if self.fp32 else 2
|
||||
scaling_constant = 960 # Determined experimentally.
|
||||
|
||||
if use_tiling:
|
||||
tile_size = self.tile_size
|
||||
if tile_size == 0:
|
||||
tile_size = vae.tile_sample_min_size
|
||||
assert isinstance(tile_size, int)
|
||||
out_h = tile_size
|
||||
out_w = tile_size
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
|
||||
# We add 25% to the working memory estimate when tiling is enabled to account for factors like tile overlap
|
||||
# and number of tiles. We could make this more precise in the future, but this should be good enough for
|
||||
# most use cases.
|
||||
working_memory = working_memory * 1.25
|
||||
else:
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
|
||||
if self.fp32:
|
||||
# If we are running in FP32, then we should account for the likely increase in model size (~250MB).
|
||||
working_memory += 250 * 2**20
|
||||
|
||||
# We add 20% to the working memory estimate to be safe.
|
||||
working_memory = int(working_memory * 1.2)
|
||||
return working_memory
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
|
||||
use_tiling = self.tiled or context.config.get().force_tiled_decode
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny))
|
||||
|
||||
estimated_working_memory = self._estimate_working_memory(latents, use_tiling, vae_info.model)
|
||||
with (
|
||||
SeamlessExt.static_patch_model(vae_info.model, self.vae.seamless_axes),
|
||||
vae_info.model_on_device(working_mem_bytes=estimated_working_memory) as (_, vae),
|
||||
vae_info.model_on_device(working_mem_bytes=self._estimate_working_memory(latents)) as (_, vae),
|
||||
):
|
||||
context.util.signal_progress("Running VAE decoder")
|
||||
assert isinstance(vae, (AutoencoderKL, AutoencoderTiny))
|
||||
latents = latents.to(TorchDevice.choose_torch_device())
|
||||
latents = latents.to(vae.device)
|
||||
if self.fp32:
|
||||
vae.to(dtype=torch.float32)
|
||||
|
||||
@@ -130,7 +107,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
vae.to(dtype=torch.float16)
|
||||
latents = latents.half()
|
||||
|
||||
if use_tiling:
|
||||
if self.tiled or context.config.get().force_tiled_decode:
|
||||
vae.enable_tiling()
|
||||
else:
|
||||
vae.disable_tiling()
|
||||
|
||||
@@ -2,22 +2,9 @@ import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
Classification,
|
||||
InvocationContext,
|
||||
invocation,
|
||||
)
|
||||
from invokeai.app.invocations.fields import (
|
||||
BoundingBoxField,
|
||||
ColorField,
|
||||
ImageField,
|
||||
InputField,
|
||||
TensorField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.primitives import BoundingBoxOutput, ImageOutput, MaskOutput
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, InvocationContext, invocation
|
||||
from invokeai.app.invocations.fields import ImageField, InputField, TensorField, WithBoard, WithMetadata
|
||||
from invokeai.app.invocations.primitives import ImageOutput, MaskOutput
|
||||
from invokeai.backend.image_util.util import pil_to_np
|
||||
|
||||
|
||||
@@ -86,7 +73,7 @@ class AlphaMaskToTensorInvocation(BaseInvocation):
|
||||
title="Invert Tensor Mask",
|
||||
tags=["conditioning"],
|
||||
category="conditioning",
|
||||
version="1.1.0",
|
||||
version="1.0.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class InvertTensorMaskInvocation(BaseInvocation):
|
||||
@@ -96,15 +83,6 @@ class InvertTensorMaskInvocation(BaseInvocation):
|
||||
|
||||
def invoke(self, context: InvocationContext) -> MaskOutput:
|
||||
mask = context.tensors.load(self.mask.tensor_name)
|
||||
|
||||
# Verify dtype and shape.
|
||||
assert mask.dtype == torch.bool
|
||||
assert mask.dim() in [2, 3]
|
||||
|
||||
# Unsqueeze the channel dimension if it is missing. The MaskOutput type expects a single channel.
|
||||
if mask.dim() == 2:
|
||||
mask = mask.unsqueeze(0)
|
||||
|
||||
inverted = ~mask
|
||||
|
||||
return MaskOutput(
|
||||
@@ -223,48 +201,3 @@ class ApplyMaskTensorToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
image_dto = context.images.save(image=masked_image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
WHITE = ColorField(r=255, g=255, b=255, a=255)
|
||||
|
||||
|
||||
@invocation(
|
||||
"get_image_mask_bounding_box",
|
||||
title="Get Image Mask Bounding Box",
|
||||
tags=["mask"],
|
||||
category="mask",
|
||||
version="1.0.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class GetMaskBoundingBoxInvocation(BaseInvocation):
|
||||
"""Gets the bounding box of the given mask image."""
|
||||
|
||||
mask: ImageField = InputField(description="The mask to crop.")
|
||||
margin: int = InputField(default=0, description="Margin to add to the bounding box.")
|
||||
mask_color: ColorField = InputField(default=WHITE, description="Color of the mask in the image.")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> BoundingBoxOutput:
|
||||
mask = context.images.get_pil(self.mask.image_name, mode="RGBA")
|
||||
mask_np = np.array(mask)
|
||||
|
||||
# Convert mask_color to RGBA tuple
|
||||
mask_color_rgb = self.mask_color.tuple()
|
||||
|
||||
# Find the bounding box of the mask color
|
||||
y, x = np.where(np.all(mask_np == mask_color_rgb, axis=-1))
|
||||
|
||||
if len(x) == 0 or len(y) == 0:
|
||||
# No pixels found with the given color
|
||||
return BoundingBoxOutput(bounding_box=BoundingBoxField(x_min=0, y_min=0, x_max=0, y_max=0))
|
||||
|
||||
left, upper, right, lower = x.min(), y.min(), x.max(), y.max()
|
||||
|
||||
# Add the margin
|
||||
left = max(0, left - self.margin)
|
||||
upper = max(0, upper - self.margin)
|
||||
right = min(mask_np.shape[1], right + self.margin)
|
||||
lower = min(mask_np.shape[0], lower + self.margin)
|
||||
|
||||
bounding_box = BoundingBoxField(x_min=left, y_min=upper, x_max=right, y_max=lower)
|
||||
|
||||
return BoundingBoxOutput(bounding_box=bounding_box)
|
||||
|
||||
@@ -68,7 +68,6 @@ class CLIPField(BaseModel):
|
||||
class T5EncoderField(BaseModel):
|
||||
tokenizer: ModelIdentifierField = Field(description="Info to load tokenizer submodel")
|
||||
text_encoder: ModelIdentifierField = Field(description="Info to load text_encoder submodel")
|
||||
loras: List[LoRAField] = Field(description="LoRAs to apply on model loading")
|
||||
|
||||
|
||||
class VAEField(BaseModel):
|
||||
@@ -206,7 +205,7 @@ class LoRALoaderInvocation(BaseInvocation):
|
||||
lora_key = self.lora.key
|
||||
|
||||
if not context.models.exists(lora_key):
|
||||
raise Exception(f"Unknown lora: {lora_key}!")
|
||||
raise Exception(f"Unkown lora: {lora_key}!")
|
||||
|
||||
if self.unet is not None and any(lora.lora.key == lora_key for lora in self.unet.loras):
|
||||
raise Exception(f'LoRA "{lora_key}" already applied to unet')
|
||||
@@ -257,12 +256,12 @@ class LoRASelectorInvocation(BaseInvocation):
|
||||
return LoRASelectorOutput(lora=LoRAField(lora=self.lora, weight=self.weight))
|
||||
|
||||
|
||||
@invocation("lora_collection_loader", title="LoRA Collection Loader", tags=["model"], category="model", version="1.1.0")
|
||||
@invocation("lora_collection_loader", title="LoRA Collection Loader", tags=["model"], category="model", version="1.0.0")
|
||||
class LoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of LoRAs to the provided UNet and CLIP models."""
|
||||
|
||||
loras: Optional[LoRAField | list[LoRAField]] = InputField(
|
||||
default=None, description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
loras: LoRAField | list[LoRAField] = InputField(
|
||||
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
default=None,
|
||||
@@ -282,14 +281,7 @@ class LoRACollectionLoader(BaseInvocation):
|
||||
loras = self.loras if isinstance(self.loras, list) else [self.loras]
|
||||
added_loras: list[str] = []
|
||||
|
||||
if self.unet is not None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
if self.clip is not None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
|
||||
for lora in loras:
|
||||
if lora is None:
|
||||
continue
|
||||
if lora.lora.key in added_loras:
|
||||
continue
|
||||
|
||||
@@ -300,10 +292,14 @@ class LoRACollectionLoader(BaseInvocation):
|
||||
|
||||
added_loras.append(lora.lora.key)
|
||||
|
||||
if self.unet is not None and output.unet is not None:
|
||||
if self.unet is not None:
|
||||
if output.unet is None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
output.unet.loras.append(lora)
|
||||
|
||||
if self.clip is not None and output.clip is not None:
|
||||
if self.clip is not None:
|
||||
if output.clip is None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
output.clip.loras.append(lora)
|
||||
|
||||
return output
|
||||
@@ -403,13 +399,13 @@ class SDXLLoRALoaderInvocation(BaseInvocation):
|
||||
title="SDXL LoRA Collection Loader",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.1.0",
|
||||
version="1.0.0",
|
||||
)
|
||||
class SDXLLoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of SDXL LoRAs to the provided UNet and CLIP models."""
|
||||
|
||||
loras: Optional[LoRAField | list[LoRAField]] = InputField(
|
||||
default=None, description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
loras: LoRAField | list[LoRAField] = InputField(
|
||||
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
default=None,
|
||||
@@ -435,18 +431,7 @@ class SDXLLoRACollectionLoader(BaseInvocation):
|
||||
loras = self.loras if isinstance(self.loras, list) else [self.loras]
|
||||
added_loras: list[str] = []
|
||||
|
||||
if self.unet is not None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
|
||||
if self.clip is not None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
|
||||
if self.clip2 is not None:
|
||||
output.clip2 = self.clip2.model_copy(deep=True)
|
||||
|
||||
for lora in loras:
|
||||
if lora is None:
|
||||
continue
|
||||
if lora.lora.key in added_loras:
|
||||
continue
|
||||
|
||||
@@ -457,13 +442,19 @@ class SDXLLoRACollectionLoader(BaseInvocation):
|
||||
|
||||
added_loras.append(lora.lora.key)
|
||||
|
||||
if self.unet is not None and output.unet is not None:
|
||||
if self.unet is not None:
|
||||
if output.unet is None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
output.unet.loras.append(lora)
|
||||
|
||||
if self.clip is not None and output.clip is not None:
|
||||
if self.clip is not None:
|
||||
if output.clip is None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
output.clip.loras.append(lora)
|
||||
|
||||
if self.clip2 is not None and output.clip2 is not None:
|
||||
if self.clip2 is not None:
|
||||
if output.clip2 is None:
|
||||
output.clip2 = self.clip2.model_copy(deep=True)
|
||||
output.clip2.loras.append(lora)
|
||||
|
||||
return output
|
||||
@@ -481,7 +472,7 @@ class VAELoaderInvocation(BaseInvocation):
|
||||
key = self.vae_model.key
|
||||
|
||||
if not context.models.exists(key):
|
||||
raise Exception(f"Unknown vae: {key}!")
|
||||
raise Exception(f"Unkown vae: {key}!")
|
||||
|
||||
return VAEOutput(vae=VAEField(vae=self.vae_model))
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import torch
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
@@ -416,7 +417,6 @@ class ColorInvocation(BaseInvocation):
|
||||
class MaskOutput(BaseInvocationOutput):
|
||||
"""A torch mask tensor."""
|
||||
|
||||
# shape: [1, H, W], dtype: bool
|
||||
mask: TensorField = OutputField(description="The mask.")
|
||||
width: int = OutputField(description="The width of the mask in pixels.")
|
||||
height: int = OutputField(description="The height of the mask in pixels.")
|
||||
@@ -539,3 +539,23 @@ class BoundingBoxInvocation(BaseInvocation):
|
||||
|
||||
|
||||
# endregion
|
||||
|
||||
|
||||
@invocation(
|
||||
"image_batch",
|
||||
title="Image Batch",
|
||||
tags=["primitives", "image", "batch", "internal"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
classification=Classification.Special,
|
||||
)
|
||||
class ImageBatchInvocation(BaseInvocation):
|
||||
"""Create a batched generation, where the workflow is executed once for each image in the batch."""
|
||||
|
||||
images: list[ImageField] = InputField(min_length=1, description="The images to batch over", input=Input.Direct)
|
||||
|
||||
def __init__(self):
|
||||
raise NotImplementedError("This class should never be executed or instantiated directly.")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
raise NotImplementedError("This class should never be executed or instantiated directly.")
|
||||
|
||||
@@ -16,7 +16,6 @@ from invokeai.app.invocations.primitives import LatentsOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation(
|
||||
@@ -40,7 +39,7 @@ class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
vae.disable_tiling()
|
||||
|
||||
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=vae.dtype)
|
||||
image_tensor = image_tensor.to(device=vae.device, dtype=vae.dtype)
|
||||
with torch.inference_mode():
|
||||
image_tensor_dist = vae.encode(image_tensor).latent_dist
|
||||
# TODO: Use seed to make sampling reproducible.
|
||||
|
||||
@@ -44,16 +44,15 @@ class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
def _estimate_working_memory(self, latents: torch.Tensor, vae: AutoencoderKL) -> int:
|
||||
"""Estimate the working memory required by the invocation in bytes."""
|
||||
# It was found experimentally that the peak working memory scales linearly with the number of pixels and the
|
||||
# element size (precision).
|
||||
# element size (precision). This estimate is accurate for both SD1 and SDXL.
|
||||
out_h = LATENT_SCALE_FACTOR * latents.shape[-2]
|
||||
out_w = LATENT_SCALE_FACTOR * latents.shape[-1]
|
||||
element_size = next(vae.parameters()).element_size()
|
||||
scaling_constant = 1230 # Determined experimentally.
|
||||
# TODO(ryand): Need to tune this value, it was copied from the SD1 implementation.
|
||||
scaling_constant = 960 # Determined experimentally.
|
||||
working_memory = out_h * out_w * element_size * scaling_constant
|
||||
|
||||
# We add a 20% buffer to the working memory estimate to be safe.
|
||||
working_memory = working_memory * 1.2
|
||||
return int(working_memory)
|
||||
return working_memory
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
@@ -68,7 +67,7 @@ class SD3LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
):
|
||||
context.util.signal_progress("Running VAE")
|
||||
assert isinstance(vae, (AutoencoderKL))
|
||||
latents = latents.to(TorchDevice.choose_torch_device())
|
||||
latents = latents.to(vae.device)
|
||||
|
||||
vae.disable_tiling()
|
||||
|
||||
|
||||
@@ -10,10 +10,6 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, T5EncoderField, TransformerField, VAEField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.t5_model_identifier import (
|
||||
preprocess_t5_encoder_model_identifier,
|
||||
preprocess_t5_tokenizer_model_identifier,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import SubModelType
|
||||
|
||||
|
||||
@@ -92,13 +88,21 @@ class Sd3ModelLoaderInvocation(BaseInvocation):
|
||||
if self.clip_g_model
|
||||
else self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
)
|
||||
tokenizer_t5 = preprocess_t5_tokenizer_model_identifier(self.t5_encoder_model or self.model)
|
||||
t5_encoder = preprocess_t5_encoder_model_identifier(self.t5_encoder_model or self.model)
|
||||
tokenizer_t5 = (
|
||||
self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.Tokenizer3})
|
||||
if self.t5_encoder_model
|
||||
else self.model.model_copy(update={"submodel_type": SubModelType.Tokenizer3})
|
||||
)
|
||||
t5_encoder = (
|
||||
self.t5_encoder_model.model_copy(update={"submodel_type": SubModelType.TextEncoder3})
|
||||
if self.t5_encoder_model
|
||||
else self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder3})
|
||||
)
|
||||
|
||||
return Sd3ModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer, loras=[]),
|
||||
clip_l=CLIPField(tokenizer=tokenizer_l, text_encoder=clip_encoder_l, loras=[], skipped_layers=0),
|
||||
clip_g=CLIPField(tokenizer=tokenizer_g, text_encoder=clip_encoder_g, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer_t5, text_encoder=t5_encoder, loras=[]),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer_t5, text_encoder=t5_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
)
|
||||
|
||||
@@ -87,11 +87,14 @@ class Sd3TextEncoderInvocation(BaseInvocation):
|
||||
|
||||
def _t5_encode(self, context: InvocationContext, max_seq_len: int) -> torch.Tensor:
|
||||
assert self.t5_encoder is not None
|
||||
t5_tokenizer_info = context.models.load(self.t5_encoder.tokenizer)
|
||||
t5_text_encoder_info = context.models.load(self.t5_encoder.text_encoder)
|
||||
|
||||
prompt = [self.prompt]
|
||||
|
||||
with (
|
||||
context.models.load(self.t5_encoder.text_encoder) as t5_text_encoder,
|
||||
context.models.load(self.t5_encoder.tokenizer) as t5_tokenizer,
|
||||
t5_text_encoder_info as t5_text_encoder,
|
||||
t5_tokenizer_info as t5_tokenizer,
|
||||
):
|
||||
context.util.signal_progress("Running T5 encoder")
|
||||
assert isinstance(t5_text_encoder, T5EncoderModel)
|
||||
@@ -118,7 +121,7 @@ class Sd3TextEncoderInvocation(BaseInvocation):
|
||||
f" {max_seq_len} tokens: {removed_text}"
|
||||
)
|
||||
|
||||
prompt_embeds = t5_text_encoder(text_input_ids.to(TorchDevice.choose_torch_device()))[0]
|
||||
prompt_embeds = t5_text_encoder(text_input_ids.to(t5_text_encoder.device))[0]
|
||||
|
||||
assert isinstance(prompt_embeds, torch.Tensor)
|
||||
return prompt_embeds
|
||||
@@ -126,12 +129,14 @@ class Sd3TextEncoderInvocation(BaseInvocation):
|
||||
def _clip_encode(
|
||||
self, context: InvocationContext, clip_model: CLIPField, tokenizer_max_length: int = 77
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
clip_tokenizer_info = context.models.load(clip_model.tokenizer)
|
||||
clip_text_encoder_info = context.models.load(clip_model.text_encoder)
|
||||
|
||||
prompt = [self.prompt]
|
||||
|
||||
clip_text_encoder_info = context.models.load(clip_model.text_encoder)
|
||||
with (
|
||||
clip_text_encoder_info.model_on_device() as (cached_weights, clip_text_encoder),
|
||||
context.models.load(clip_model.tokenizer) as clip_tokenizer,
|
||||
clip_tokenizer_info as clip_tokenizer,
|
||||
ExitStack() as exit_stack,
|
||||
):
|
||||
context.util.signal_progress("Running CLIP encoder")
|
||||
@@ -150,7 +155,7 @@ class Sd3TextEncoderInvocation(BaseInvocation):
|
||||
model=clip_text_encoder,
|
||||
patches=self._clip_lora_iterator(context, clip_model),
|
||||
prefix=FLUX_LORA_CLIP_PREFIX,
|
||||
dtype=clip_text_encoder.dtype,
|
||||
dtype=TorchDevice.choose_torch_dtype(),
|
||||
cached_weights=cached_weights,
|
||||
)
|
||||
)
|
||||
@@ -181,7 +186,7 @@ class Sd3TextEncoderInvocation(BaseInvocation):
|
||||
f" {tokenizer_max_length} tokens: {removed_text}"
|
||||
)
|
||||
prompt_embeds = clip_text_encoder(
|
||||
input_ids=text_input_ids.to(TorchDevice.choose_torch_device()), output_hidden_states=True
|
||||
input_ids=text_input_ids.to(clip_text_encoder.device), output_hidden_states=True
|
||||
)
|
||||
pooled_prompt_embeds = prompt_embeds[0]
|
||||
prompt_embeds = prompt_embeds.hidden_states[-2]
|
||||
|
||||
@@ -49,7 +49,7 @@ class SAMPointsField(BaseModel):
|
||||
title="Segment Anything",
|
||||
tags=["prompt", "segmentation"],
|
||||
category="segmentation",
|
||||
version="1.2.0",
|
||||
version="1.1.0",
|
||||
)
|
||||
class SegmentAnythingInvocation(BaseInvocation):
|
||||
"""Runs a Segment Anything Model."""
|
||||
@@ -96,10 +96,8 @@ class SegmentAnythingInvocation(BaseInvocation):
|
||||
# masks contains bool values, so we merge them via max-reduce.
|
||||
combined_mask, _ = torch.stack(masks).max(dim=0)
|
||||
|
||||
# Unsqueeze the channel dimension.
|
||||
combined_mask = combined_mask.unsqueeze(0)
|
||||
mask_tensor_name = context.tensors.save(combined_mask)
|
||||
_, height, width = combined_mask.shape
|
||||
height, width = combined_mask.shape
|
||||
return MaskOutput(mask=TensorField(tensor_name=mask_tensor_name), width=width, height=height)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -22,7 +22,6 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
|
||||
from invokeai.backend.tiles.tiles import calc_tiles_min_overlap
|
||||
from invokeai.backend.tiles.utils import TBLR, Tile
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation("spandrel_image_to_image", title="Image-to-Image", tags=["upscale"], category="upscale", version="1.3.0")
|
||||
@@ -103,7 +102,7 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
(height * scale, width * scale, channels), dtype=torch.uint8, device=torch.device("cpu")
|
||||
)
|
||||
|
||||
image_tensor = image_tensor.to(device=TorchDevice.choose_torch_device(), dtype=spandrel_model.dtype)
|
||||
image_tensor = image_tensor.to(device=spandrel_model.device, dtype=spandrel_model.dtype)
|
||||
|
||||
# Run the model on each tile.
|
||||
pbar = tqdm(list(zip(tiles, scaled_tiles, strict=True)), desc="Upscaling Tiles")
|
||||
@@ -117,7 +116,9 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
raise CanceledException
|
||||
|
||||
# Extract the current tile from the input tensor.
|
||||
input_tile = image_tensor[:, :, tile.coords.top : tile.coords.bottom, tile.coords.left : tile.coords.right]
|
||||
input_tile = image_tensor[
|
||||
:, :, tile.coords.top : tile.coords.bottom, tile.coords.left : tile.coords.right
|
||||
].to(device=spandrel_model.device, dtype=spandrel_model.dtype)
|
||||
|
||||
# Run the model on the tile.
|
||||
output_tile = spandrel_model.run(input_tile)
|
||||
@@ -150,12 +151,15 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
return pil_image
|
||||
|
||||
@torch.no_grad()
|
||||
@torch.inference_mode()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
# Images are converted to RGB, because most models don't support an alpha channel. In the future, we may want to
|
||||
# revisit this.
|
||||
image = context.images.get_pil(self.image.image_name, mode="RGB")
|
||||
|
||||
# Load the model.
|
||||
spandrel_model_info = context.models.load(self.image_to_image_model)
|
||||
|
||||
def step_callback(step: int, total_steps: int) -> None:
|
||||
context.util.signal_progress(
|
||||
message=f"Processing tile {step}/{total_steps}",
|
||||
@@ -163,7 +167,7 @@ class SpandrelImageToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
)
|
||||
|
||||
# Do the upscaling.
|
||||
with context.models.load(self.image_to_image_model) as spandrel_model:
|
||||
with spandrel_model_info as spandrel_model:
|
||||
assert isinstance(spandrel_model, SpandrelImageToImageModel)
|
||||
|
||||
# Upscale the image
|
||||
@@ -196,12 +200,15 @@ class SpandrelImageToImageAutoscaleInvocation(SpandrelImageToImageInvocation):
|
||||
description="If true, the output image will be resized to the nearest multiple of 8 in both dimensions.",
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
@torch.inference_mode()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
# Images are converted to RGB, because most models don't support an alpha channel. In the future, we may want to
|
||||
# revisit this.
|
||||
image = context.images.get_pil(self.image.image_name, mode="RGB")
|
||||
|
||||
# Load the model.
|
||||
spandrel_model_info = context.models.load(self.image_to_image_model)
|
||||
|
||||
# The target size of the image, determined by the provided scale. We'll run the upscaler until we hit this size.
|
||||
# Later, we may mutate this value if the model doesn't upscale the image or if the user requested a multiple of 8.
|
||||
target_width = int(image.width * self.scale)
|
||||
@@ -214,7 +221,7 @@ class SpandrelImageToImageAutoscaleInvocation(SpandrelImageToImageInvocation):
|
||||
)
|
||||
|
||||
# Do the upscaling.
|
||||
with context.models.load(self.image_to_image_model) as spandrel_model:
|
||||
with spandrel_model_info as spandrel_model:
|
||||
assert isinstance(spandrel_model, SpandrelImageToImageModel)
|
||||
|
||||
iteration = 1
|
||||
|
||||
@@ -201,24 +201,25 @@ class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
|
||||
yield (lora_info.model, lora.weight)
|
||||
del lora_info
|
||||
|
||||
device = TorchDevice.choose_torch_device()
|
||||
# Load the UNet model.
|
||||
unet_info = context.models.load(self.unet.unet)
|
||||
|
||||
with (
|
||||
ExitStack() as exit_stack,
|
||||
context.models.load(self.unet.unet) as unet,
|
||||
unet_info as unet,
|
||||
LayerPatcher.apply_smart_model_patches(
|
||||
model=unet, patches=_lora_loader(), prefix="lora_unet_", dtype=unet.dtype
|
||||
),
|
||||
):
|
||||
assert isinstance(unet, UNet2DConditionModel)
|
||||
latents = latents.to(device=device, dtype=unet.dtype)
|
||||
latents = latents.to(device=unet.device, dtype=unet.dtype)
|
||||
if noise is not None:
|
||||
noise = noise.to(device=device, dtype=unet.dtype)
|
||||
noise = noise.to(device=unet.device, dtype=unet.dtype)
|
||||
scheduler = get_scheduler(
|
||||
context=context,
|
||||
scheduler_info=self.unet.scheduler,
|
||||
scheduler_name=self.scheduler,
|
||||
seed=seed,
|
||||
unet_config=unet_config,
|
||||
)
|
||||
pipeline = self.create_pipeline(unet=unet, scheduler=scheduler)
|
||||
|
||||
@@ -227,7 +228,7 @@ class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
|
||||
context=context,
|
||||
positive_conditioning_field=self.positive_conditioning,
|
||||
negative_conditioning_field=self.negative_conditioning,
|
||||
device=device,
|
||||
device=unet.device,
|
||||
dtype=unet.dtype,
|
||||
latent_height=latent_tile_height,
|
||||
latent_width=latent_tile_width,
|
||||
@@ -240,7 +241,6 @@ class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
|
||||
context=context,
|
||||
control_input=self.control,
|
||||
latents_shape=list(latents.shape),
|
||||
device=device,
|
||||
# do_classifier_free_guidance=(self.cfg_scale >= 1.0))
|
||||
do_classifier_free_guidance=True,
|
||||
exit_stack=exit_stack,
|
||||
@@ -266,7 +266,7 @@ class TiledMultiDiffusionDenoiseLatents(BaseInvocation):
|
||||
|
||||
timesteps, init_timestep, scheduler_step_kwargs = DenoiseLatentsInvocation.init_scheduler(
|
||||
scheduler,
|
||||
device=device,
|
||||
device=unet.device,
|
||||
steps=self.steps,
|
||||
denoising_start=self.denoising_start,
|
||||
denoising_end=self.denoising_end,
|
||||
|
||||
@@ -57,7 +57,7 @@ def deserialize_board_record(board_dict: dict) -> BoardRecord:
|
||||
|
||||
|
||||
class BoardChanges(BaseModel, extra="forbid"):
|
||||
board_name: Optional[str] = Field(default=None, description="The board's new name.", max_length=300)
|
||||
board_name: Optional[str] = Field(default=None, description="The board's new name.")
|
||||
cover_image_name: Optional[str] = Field(default=None, description="The name of the board's new cover image.")
|
||||
archived: Optional[bool] = Field(default=None, description="Whether or not the board is archived")
|
||||
|
||||
|
||||
@@ -76,21 +76,17 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
log_format: Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.<br>Valid values: `plain`, `color`, `syslog`, `legacy`
|
||||
log_level: Emit logging messages at this level or higher.<br>Valid values: `debug`, `info`, `warning`, `error`, `critical`
|
||||
log_sql: Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose.
|
||||
log_level_network: Log level for network-related messages. 'info' and 'debug' are very verbose.<br>Valid values: `debug`, `info`, `warning`, `error`, `critical`
|
||||
use_memory_db: Use in-memory database. Useful for development.
|
||||
dev_reload: Automatically reload when Python sources are changed. Does not reload node definitions.
|
||||
profile_graphs: Enable graph profiling using `cProfile`.
|
||||
profile_prefix: An optional prefix for profile output files.
|
||||
profiles_dir: Path to profiles output directory.
|
||||
max_cache_ram_gb: The maximum amount of CPU RAM to use for model caching in GB. If unset, the limit will be configured based on the available RAM. In most cases, it is recommended to leave this unset.
|
||||
max_cache_vram_gb: The amount of VRAM to use for model caching in GB. If unset, the limit will be configured based on the available VRAM and the device_working_mem_gb. In most cases, it is recommended to leave this unset.
|
||||
ram: The maximum amount of CPU RAM to use for model caching in GB. If unset, the limit will be configured based on the available RAM. In most cases, it is recommended to leave this unset.
|
||||
vram: The amount of VRAM to use for model caching in GB. If unset, the limit will be configured based on the available VRAM and the device_working_mem_gb. In most cases, it is recommended to leave this unset.
|
||||
lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behaviour is out of beta.
|
||||
log_memory_usage: If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.
|
||||
device_working_mem_gb: The amount of working memory to keep available on the compute device (in GB). Has no effect if running on CPU. If you are experiencing OOM errors, try increasing this value.
|
||||
enable_partial_loading: Enable partial loading of models. This enables models to run with reduced VRAM requirements (at the cost of slower speed) by streaming the model from RAM to VRAM as its used. In some edge cases, partial loading can cause models to run more slowly if they were previously being fully loaded into VRAM.
|
||||
keep_ram_copy_of_weights: Whether to keep a full RAM copy of a model's weights when the model is loaded in VRAM. Keeping a RAM copy increases average RAM usage, but speeds up model switching and LoRA patching (assuming there is sufficient RAM). Set this to False if RAM pressure is consistently high.
|
||||
ram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_ram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
|
||||
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
|
||||
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
|
||||
sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
|
||||
@@ -148,7 +144,6 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
log_format: LOG_FORMAT = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.')
|
||||
log_level: LOG_LEVEL = Field(default="info", description="Emit logging messages at this level or higher.")
|
||||
log_sql: bool = Field(default=False, description="Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose.")
|
||||
log_level_network: LOG_LEVEL = Field(default='warning', description="Log level for network-related messages. 'info' and 'debug' are very verbose.")
|
||||
|
||||
# Development
|
||||
use_memory_db: bool = Field(default=False, description="Use in-memory database. Useful for development.")
|
||||
@@ -158,16 +153,12 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
profiles_dir: Path = Field(default=Path("profiles"), description="Path to profiles output directory.")
|
||||
|
||||
# CACHE
|
||||
max_cache_ram_gb: Optional[float] = Field(default=None, gt=0, description="The maximum amount of CPU RAM to use for model caching in GB. If unset, the limit will be configured based on the available RAM. In most cases, it is recommended to leave this unset.")
|
||||
max_cache_vram_gb: Optional[float] = Field(default=None, ge=0, description="The amount of VRAM to use for model caching in GB. If unset, the limit will be configured based on the available VRAM and the device_working_mem_gb. In most cases, it is recommended to leave this unset.")
|
||||
ram: Optional[float] = Field(default=None, gt=0, description="The maximum amount of CPU RAM to use for model caching in GB. If unset, the limit will be configured based on the available RAM. In most cases, it is recommended to leave this unset.")
|
||||
vram: Optional[float] = Field(default=None, ge=0, description="The amount of VRAM to use for model caching in GB. If unset, the limit will be configured based on the available VRAM and the device_working_mem_gb. In most cases, it is recommended to leave this unset.")
|
||||
lazy_offload: bool = Field(default=True, description="DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behaviour is out of beta.")
|
||||
log_memory_usage: bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.")
|
||||
device_working_mem_gb: float = Field(default=3, description="The amount of working memory to keep available on the compute device (in GB). Has no effect if running on CPU. If you are experiencing OOM errors, try increasing this value.")
|
||||
device_working_mem_gb: float = Field(default=2, description="The amount of working memory to keep available on the compute device (in GB). Has no effect if running on CPU. If you are experiencing OOM errors, try increasing this value.")
|
||||
enable_partial_loading: bool = Field(default=False, description="Enable partial loading of models. This enables models to run with reduced VRAM requirements (at the cost of slower speed) by streaming the model from RAM to VRAM as its used. In some edge cases, partial loading can cause models to run more slowly if they were previously being fully loaded into VRAM.")
|
||||
keep_ram_copy_of_weights: bool = Field(default=True, description="Whether to keep a full RAM copy of a model's weights when the model is loaded in VRAM. Keeping a RAM copy increases average RAM usage, but speeds up model switching and LoRA patching (assuming there is sufficient RAM). Set this to False if RAM pressure is consistently high.")
|
||||
# Deprecated CACHE configs
|
||||
ram: Optional[float] = Field(default=None, gt=0, description="DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_ram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.")
|
||||
vram: Optional[float] = Field(default=None, ge=0, description="DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.")
|
||||
lazy_offload: bool = Field(default=True, description="DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.")
|
||||
|
||||
# DEVICE
|
||||
device: DEVICE = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.")
|
||||
|
||||
@@ -8,7 +8,7 @@ import time
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from queue import Empty, PriorityQueue
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Set
|
||||
from typing import Any, Dict, List, Literal, Optional, Set
|
||||
|
||||
import requests
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
@@ -28,13 +28,11 @@ from invokeai.app.services.download.download_base import (
|
||||
ServiceInactiveException,
|
||||
UnknownJobIDException,
|
||||
)
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.util.misc import get_iso_timestamp
|
||||
from invokeai.backend.model_manager.metadata import RemoteModelFile
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
|
||||
# Maximum number of bytes to download during each call to requests.iter_content()
|
||||
DOWNLOAD_CHUNK_SIZE = 100000
|
||||
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
from .events_base import EventServiceBase # noqa F401
|
||||
|
||||
@@ -4,7 +4,6 @@ from fastapi_events.handlers.local import local_handler
|
||||
from fastapi_events.registry.payload_schema import registry as payload_schema
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from invokeai.app.services.model_install.model_install_common import ModelInstallJob, ModelSource
|
||||
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
|
||||
from invokeai.app.services.session_queue.session_queue_common import (
|
||||
QUEUE_ITEM_STATUS,
|
||||
@@ -19,7 +18,7 @@ from invokeai.backend.model_manager.config import AnyModelConfig, SubModelType
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from invokeai.app.services.download.download_base import DownloadJob
|
||||
from invokeai.app.services.model_install.model_install_common import ModelInstallJob, ModelSource
|
||||
from invokeai.app.services.model_install.model_install_common import ModelInstallJob
|
||||
|
||||
|
||||
class EventBase(BaseModel):
|
||||
@@ -423,7 +422,7 @@ class ModelInstallDownloadStartedEvent(ModelEventBase):
|
||||
__event_name__ = "model_install_download_started"
|
||||
|
||||
id: int = Field(description="The ID of the install job")
|
||||
source: ModelSource = Field(description="Source of the model; local path, repo_id or url")
|
||||
source: str = Field(description="Source of the model; local path, repo_id or url")
|
||||
local_path: str = Field(description="Where model is downloading to")
|
||||
bytes: int = Field(description="Number of bytes downloaded so far")
|
||||
total_bytes: int = Field(description="Total size of download, including all files")
|
||||
@@ -444,7 +443,7 @@ class ModelInstallDownloadStartedEvent(ModelEventBase):
|
||||
]
|
||||
return cls(
|
||||
id=job.id,
|
||||
source=job.source,
|
||||
source=str(job.source),
|
||||
local_path=job.local_path.as_posix(),
|
||||
parts=parts,
|
||||
bytes=job.bytes,
|
||||
@@ -459,7 +458,7 @@ class ModelInstallDownloadProgressEvent(ModelEventBase):
|
||||
__event_name__ = "model_install_download_progress"
|
||||
|
||||
id: int = Field(description="The ID of the install job")
|
||||
source: ModelSource = Field(description="Source of the model; local path, repo_id or url")
|
||||
source: str = Field(description="Source of the model; local path, repo_id or url")
|
||||
local_path: str = Field(description="Where model is downloading to")
|
||||
bytes: int = Field(description="Number of bytes downloaded so far")
|
||||
total_bytes: int = Field(description="Total size of download, including all files")
|
||||
@@ -480,7 +479,7 @@ class ModelInstallDownloadProgressEvent(ModelEventBase):
|
||||
]
|
||||
return cls(
|
||||
id=job.id,
|
||||
source=job.source,
|
||||
source=str(job.source),
|
||||
local_path=job.local_path.as_posix(),
|
||||
parts=parts,
|
||||
bytes=job.bytes,
|
||||
@@ -495,11 +494,11 @@ class ModelInstallDownloadsCompleteEvent(ModelEventBase):
|
||||
__event_name__ = "model_install_downloads_complete"
|
||||
|
||||
id: int = Field(description="The ID of the install job")
|
||||
source: ModelSource = Field(description="Source of the model; local path, repo_id or url")
|
||||
source: str = Field(description="Source of the model; local path, repo_id or url")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "ModelInstallJob") -> "ModelInstallDownloadsCompleteEvent":
|
||||
return cls(id=job.id, source=job.source)
|
||||
return cls(id=job.id, source=str(job.source))
|
||||
|
||||
|
||||
@payload_schema.register
|
||||
@@ -509,11 +508,11 @@ class ModelInstallStartedEvent(ModelEventBase):
|
||||
__event_name__ = "model_install_started"
|
||||
|
||||
id: int = Field(description="The ID of the install job")
|
||||
source: ModelSource = Field(description="Source of the model; local path, repo_id or url")
|
||||
source: str = Field(description="Source of the model; local path, repo_id or url")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "ModelInstallJob") -> "ModelInstallStartedEvent":
|
||||
return cls(id=job.id, source=job.source)
|
||||
return cls(id=job.id, source=str(job.source))
|
||||
|
||||
|
||||
@payload_schema.register
|
||||
@@ -523,14 +522,14 @@ class ModelInstallCompleteEvent(ModelEventBase):
|
||||
__event_name__ = "model_install_complete"
|
||||
|
||||
id: int = Field(description="The ID of the install job")
|
||||
source: ModelSource = Field(description="Source of the model; local path, repo_id or url")
|
||||
source: str = Field(description="Source of the model; local path, repo_id or url")
|
||||
key: str = Field(description="Model config record key")
|
||||
total_bytes: Optional[int] = Field(description="Size of the model (may be None for installation of a local path)")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "ModelInstallJob") -> "ModelInstallCompleteEvent":
|
||||
assert job.config_out is not None
|
||||
return cls(id=job.id, source=job.source, key=(job.config_out.key), total_bytes=job.total_bytes)
|
||||
return cls(id=job.id, source=str(job.source), key=(job.config_out.key), total_bytes=job.total_bytes)
|
||||
|
||||
|
||||
@payload_schema.register
|
||||
@@ -540,11 +539,11 @@ class ModelInstallCancelledEvent(ModelEventBase):
|
||||
__event_name__ = "model_install_cancelled"
|
||||
|
||||
id: int = Field(description="The ID of the install job")
|
||||
source: ModelSource = Field(description="Source of the model; local path, repo_id or url")
|
||||
source: str = Field(description="Source of the model; local path, repo_id or url")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "ModelInstallJob") -> "ModelInstallCancelledEvent":
|
||||
return cls(id=job.id, source=job.source)
|
||||
return cls(id=job.id, source=str(job.source))
|
||||
|
||||
|
||||
@payload_schema.register
|
||||
@@ -554,7 +553,7 @@ class ModelInstallErrorEvent(ModelEventBase):
|
||||
__event_name__ = "model_install_error"
|
||||
|
||||
id: int = Field(description="The ID of the install job")
|
||||
source: ModelSource = Field(description="Source of the model; local path, repo_id or url")
|
||||
source: str = Field(description="Source of the model; local path, repo_id or url")
|
||||
error_type: str = Field(description="The name of the exception")
|
||||
error: str = Field(description="A text description of the exception")
|
||||
|
||||
@@ -562,7 +561,7 @@ class ModelInstallErrorEvent(ModelEventBase):
|
||||
def build(cls, job: "ModelInstallJob") -> "ModelInstallErrorEvent":
|
||||
assert job.error_type is not None
|
||||
assert job.error is not None
|
||||
return cls(id=job.id, source=job.source, error_type=job.error_type, error=job.error)
|
||||
return cls(id=job.id, source=str(job.source), error_type=job.error_type, error=job.error)
|
||||
|
||||
|
||||
class BulkDownloadEventBase(EventBase):
|
||||
|
||||
@@ -22,7 +22,6 @@ class ModelCacheStatsSummary:
|
||||
"""The stats for the model cache."""
|
||||
|
||||
high_water_mark_gb: float
|
||||
cache_size_gb: float
|
||||
total_usage_gb: float
|
||||
cache_hits: int
|
||||
cache_misses: int
|
||||
@@ -79,7 +78,7 @@ class InvocationStatsSummary:
|
||||
_str += f" Model cache misses: {self.model_cache_stats.cache_misses}\n"
|
||||
_str += f" Models cached: {self.model_cache_stats.models_cached}\n"
|
||||
_str += f" Models cleared from cache: {self.model_cache_stats.models_cleared}\n"
|
||||
_str += f" Cache high water mark: {self.model_cache_stats.high_water_mark_gb:4.2f}/{self.model_cache_stats.cache_size_gb:4.2f}G\n"
|
||||
_str += f" Cache high water mark: {self.model_cache_stats.high_water_mark_gb:4.2f}G\n"
|
||||
|
||||
return _str
|
||||
|
||||
|
||||
@@ -111,7 +111,6 @@ class InvocationStatsService(InvocationStatsServiceBase):
|
||||
cache_hits=cache_stats.hits,
|
||||
cache_misses=cache_stats.misses,
|
||||
high_water_mark_gb=cache_stats.high_watermark / GB,
|
||||
cache_size_gb=cache_stats.cache_size / GB,
|
||||
total_usage_gb=sum(list(cache_stats.loaded_model_sizes.values())) / GB,
|
||||
models_cached=cache_stats.in_cache,
|
||||
models_cleared=cache_stats.cleared,
|
||||
|
||||
@@ -3,20 +3,18 @@
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, List, Optional, Union
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.download import DownloadQueueServiceBase
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_install.model_install_common import ModelInstallJob, ModelSource
|
||||
from invokeai.app.services.model_records import ModelRecordChanges, ModelRecordServiceBase
|
||||
from invokeai.backend.model_manager import AnyModelConfig
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
|
||||
|
||||
class ModelInstallServiceBase(ABC):
|
||||
"""Abstract base class for InvokeAI model installation."""
|
||||
|
||||
@@ -9,7 +9,7 @@ from pathlib import Path
|
||||
from queue import Empty, Queue
|
||||
from shutil import copyfile, copytree, move, rmtree
|
||||
from tempfile import mkdtemp
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union
|
||||
from typing import Any, Dict, List, Optional, Tuple, Type, Union
|
||||
|
||||
import torch
|
||||
import yaml
|
||||
@@ -20,6 +20,7 @@ from requests import Session
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.download import DownloadQueueServiceBase, MultiFileDownloadJob
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_install.model_install_base import ModelInstallServiceBase
|
||||
from invokeai.app.services.model_install.model_install_common import (
|
||||
@@ -56,10 +57,6 @@ from invokeai.backend.util.catch_sigint import catch_sigint
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.util import slugify
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
|
||||
|
||||
TMPDIR_PREFIX = "tmpinstall_"
|
||||
|
||||
|
||||
@@ -441,10 +438,9 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
variants = "|".join(ModelRepoVariant.__members__.values())
|
||||
hf_repoid_re = f"^([^/:]+/[^/:]+)(?::({variants})?(?::/?([^:]+))?)?$"
|
||||
source_obj: Optional[StringLikeSource] = None
|
||||
source_stripped = source.strip('"')
|
||||
|
||||
if Path(source_stripped).exists(): # A local file or directory
|
||||
source_obj = LocalModelSource(path=Path(source_stripped))
|
||||
if Path(source).exists(): # A local file or directory
|
||||
source_obj = LocalModelSource(path=Path(source))
|
||||
elif match := re.match(hf_repoid_re, source):
|
||||
source_obj = HFModelSource(
|
||||
repo_id=match.group(1),
|
||||
|
||||
@@ -14,7 +14,11 @@ class ModelLoadServiceBase(ABC):
|
||||
"""Wrapper around AnyModelLoader."""
|
||||
|
||||
@abstractmethod
|
||||
def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel:
|
||||
def load_model(
|
||||
self,
|
||||
model_config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> LoadedModel:
|
||||
"""
|
||||
Given a model's configuration, load it and return the LoadedModel object.
|
||||
|
||||
|
||||
@@ -49,7 +49,11 @@ class ModelLoadService(ModelLoadServiceBase):
|
||||
"""Return the RAM cache used by this loader."""
|
||||
return self._ram_cache
|
||||
|
||||
def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel:
|
||||
def load_model(
|
||||
self,
|
||||
model_config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> LoadedModel:
|
||||
"""
|
||||
Given a model's configuration, load it and return the LoadedModel object.
|
||||
|
||||
|
||||
@@ -84,11 +84,10 @@ class ModelManagerService(ModelManagerServiceBase):
|
||||
ram_cache = ModelCache(
|
||||
execution_device_working_mem_gb=app_config.device_working_mem_gb,
|
||||
enable_partial_loading=app_config.enable_partial_loading,
|
||||
keep_ram_copy_of_weights=app_config.keep_ram_copy_of_weights,
|
||||
max_ram_cache_size_gb=app_config.max_cache_ram_gb,
|
||||
max_vram_cache_size_gb=app_config.max_cache_vram_gb,
|
||||
execution_device=execution_device or TorchDevice.choose_torch_device(),
|
||||
max_ram_cache_size_gb=app_config.ram,
|
||||
max_vram_cache_size_gb=app_config.vram,
|
||||
logger=logger,
|
||||
execution_device=execution_device or TorchDevice.choose_torch_device(),
|
||||
)
|
||||
loader = ModelLoadService(
|
||||
app_config=app_config,
|
||||
|
||||
@@ -439,9 +439,7 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
poll_now_event.wait(self._polling_interval)
|
||||
continue
|
||||
|
||||
self._invoker.services.logger.info(
|
||||
f"Executing queue item {self._queue_item.item_id}, session {self._queue_item.session_id}"
|
||||
)
|
||||
self._invoker.services.logger.debug(f"Executing queue item {self._queue_item.item_id}")
|
||||
cancel_event.clear()
|
||||
|
||||
# Run the graph
|
||||
|
||||
@@ -5,7 +5,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
QUEUE_ITEM_STATUS,
|
||||
Batch,
|
||||
BatchStatus,
|
||||
CancelAllExceptCurrentResult,
|
||||
CancelByBatchIDsResult,
|
||||
CancelByDestinationResult,
|
||||
CancelByQueueIDResult,
|
||||
@@ -113,11 +112,6 @@ class SessionQueueBase(ABC):
|
||||
"""Cancels all queue items with matching queue ID"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cancel_all_except_current(self, queue_id: str) -> CancelAllExceptCurrentResult:
|
||||
"""Cancels all queue items except in-progress items"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list_queue_items(
|
||||
self,
|
||||
|
||||
@@ -108,16 +108,8 @@ class Batch(BaseModel):
|
||||
return v
|
||||
for batch_data_list in v:
|
||||
for datum in batch_data_list:
|
||||
if not datum.items:
|
||||
continue
|
||||
|
||||
# Special handling for numbers - they can be mixed
|
||||
# TODO(psyche): Update BatchDatum to have a `type` field to specify the type of the items, then we can have strict float and int fields
|
||||
if all(isinstance(item, (int, float)) for item in datum.items):
|
||||
continue
|
||||
|
||||
# Get the type of the first item in the list
|
||||
first_item_type = type(datum.items[0])
|
||||
first_item_type = type(datum.items[0]) if datum.items else None
|
||||
for item in datum.items:
|
||||
if type(item) is not first_item_type:
|
||||
raise BatchItemsTypeError("All items in a batch must have the same type")
|
||||
@@ -374,12 +366,6 @@ class CancelByQueueIDResult(CancelByBatchIDsResult):
|
||||
pass
|
||||
|
||||
|
||||
class CancelAllExceptCurrentResult(CancelByBatchIDsResult):
|
||||
"""Result of canceling all except current"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class IsEmptyResult(BaseModel):
|
||||
"""Result of checking if the session queue is empty"""
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
QUEUE_ITEM_STATUS,
|
||||
Batch,
|
||||
BatchStatus,
|
||||
CancelAllExceptCurrentResult,
|
||||
CancelByBatchIDsResult,
|
||||
CancelByDestinationResult,
|
||||
CancelByQueueIDResult,
|
||||
@@ -511,39 +510,6 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
self.__lock.release()
|
||||
return CancelByQueueIDResult(canceled=count)
|
||||
|
||||
def cancel_all_except_current(self, queue_id: str) -> CancelAllExceptCurrentResult:
|
||||
try:
|
||||
where = """--sql
|
||||
WHERE
|
||||
queue_id == ?
|
||||
AND status == 'pending'
|
||||
"""
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
SELECT COUNT(*)
|
||||
FROM session_queue
|
||||
{where};
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
count = self.__cursor.fetchone()[0]
|
||||
self.__cursor.execute(
|
||||
f"""--sql
|
||||
UPDATE session_queue
|
||||
SET status = 'canceled'
|
||||
{where};
|
||||
""",
|
||||
(queue_id,),
|
||||
)
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return CancelAllExceptCurrentResult(canceled=count)
|
||||
|
||||
def get_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
|
||||
@@ -51,18 +51,15 @@ class Edge(BaseModel):
|
||||
source: EdgeConnection = Field(description="The connection for the edge's from node and field")
|
||||
destination: EdgeConnection = Field(description="The connection for the edge's to node and field")
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.source.node_id}.{self.source.field} -> {self.destination.node_id}.{self.destination.field}"
|
||||
|
||||
|
||||
def get_output_field_type(node: BaseInvocation, field: str) -> Any:
|
||||
def get_output_field(node: BaseInvocation, field: str) -> Any:
|
||||
node_type = type(node)
|
||||
node_outputs = get_type_hints(node_type.get_output_annotation())
|
||||
node_output_field = node_outputs.get(field) or None
|
||||
return node_output_field
|
||||
|
||||
|
||||
def get_input_field_type(node: BaseInvocation, field: str) -> Any:
|
||||
def get_input_field(node: BaseInvocation, field: str) -> Any:
|
||||
node_type = type(node)
|
||||
node_inputs = get_type_hints(node_type)
|
||||
node_input_field = node_inputs.get(field) or None
|
||||
@@ -96,10 +93,6 @@ def is_list_or_contains_list(t):
|
||||
return False
|
||||
|
||||
|
||||
def is_any(t: Any) -> bool:
|
||||
return t == Any or Any in get_args(t)
|
||||
|
||||
|
||||
def are_connection_types_compatible(from_type: Any, to_type: Any) -> bool:
|
||||
if not from_type:
|
||||
return False
|
||||
@@ -109,7 +102,13 @@ def are_connection_types_compatible(from_type: Any, to_type: Any) -> bool:
|
||||
# TODO: this is pretty forgiving on generic types. Clean that up (need to handle optionals and such)
|
||||
if from_type and to_type:
|
||||
# Ports are compatible
|
||||
if from_type == to_type or is_any(from_type) or is_any(to_type):
|
||||
if (
|
||||
from_type == to_type
|
||||
or from_type == Any
|
||||
or to_type == Any
|
||||
or Any in get_args(from_type)
|
||||
or Any in get_args(to_type)
|
||||
):
|
||||
return True
|
||||
|
||||
if from_type in get_args(to_type):
|
||||
@@ -141,10 +140,10 @@ def are_connections_compatible(
|
||||
"""Determines if a connection between fields of two nodes is compatible."""
|
||||
|
||||
# TODO: handle iterators and collectors
|
||||
from_type = get_output_field_type(from_node, from_field)
|
||||
to_type = get_input_field_type(to_node, to_field)
|
||||
from_node_field = get_output_field(from_node, from_field)
|
||||
to_node_field = get_input_field(to_node, to_field)
|
||||
|
||||
return are_connection_types_compatible(from_type, to_type)
|
||||
return are_connection_types_compatible(from_node_field, to_node_field)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
@@ -441,19 +440,17 @@ class Graph(BaseModel):
|
||||
self.get_node(edge.destination.node_id),
|
||||
edge.destination.field,
|
||||
):
|
||||
raise InvalidEdgeError(f"Edge source and target types do not match ({edge})")
|
||||
raise InvalidEdgeError(
|
||||
f"Invalid edge from {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
|
||||
# Validate all iterators & collectors
|
||||
# TODO: may need to validate all iterators & collectors in subgraphs so edge connections in parent graphs will be available
|
||||
for node in self.nodes.values():
|
||||
if isinstance(node, IterateInvocation):
|
||||
err = self._is_iterator_connection_valid(node.id)
|
||||
if err is not None:
|
||||
raise InvalidEdgeError(f"Invalid iterator node ({node.id}): {err}")
|
||||
if isinstance(node, CollectInvocation):
|
||||
err = self._is_collector_connection_valid(node.id)
|
||||
if err is not None:
|
||||
raise InvalidEdgeError(f"Invalid collector node ({node.id}): {err}")
|
||||
if isinstance(node, IterateInvocation) and not self._is_iterator_connection_valid(node.id):
|
||||
raise InvalidEdgeError(f"Invalid iterator node {node.id}")
|
||||
if isinstance(node, CollectInvocation) and not self._is_collector_connection_valid(node.id):
|
||||
raise InvalidEdgeError(f"Invalid collector node {node.id}")
|
||||
|
||||
return None
|
||||
|
||||
@@ -480,11 +477,11 @@ class Graph(BaseModel):
|
||||
|
||||
def _is_destination_field_Any(self, edge: Edge) -> bool:
|
||||
"""Checks if the destination field for an edge is of type typing.Any"""
|
||||
return get_input_field_type(self.get_node(edge.destination.node_id), edge.destination.field) == Any
|
||||
return get_input_field(self.get_node(edge.destination.node_id), edge.destination.field) == Any
|
||||
|
||||
def _is_destination_field_list_of_Any(self, edge: Edge) -> bool:
|
||||
"""Checks if the destination field for an edge is of type typing.Any"""
|
||||
return get_input_field_type(self.get_node(edge.destination.node_id), edge.destination.field) == list[Any]
|
||||
return get_input_field(self.get_node(edge.destination.node_id), edge.destination.field) == list[Any]
|
||||
|
||||
def _validate_edge(self, edge: Edge):
|
||||
"""Validates that a new edge doesn't create a cycle in the graph"""
|
||||
@@ -494,40 +491,55 @@ class Graph(BaseModel):
|
||||
from_node = self.get_node(edge.source.node_id)
|
||||
to_node = self.get_node(edge.destination.node_id)
|
||||
except NodeNotFoundError:
|
||||
raise InvalidEdgeError(f"One or both nodes don't exist ({edge})")
|
||||
raise InvalidEdgeError("One or both nodes don't exist: {edge.source.node_id} -> {edge.destination.node_id}")
|
||||
|
||||
# Validate that an edge to this node+field doesn't already exist
|
||||
input_edges = self._get_input_edges(edge.destination.node_id, edge.destination.field)
|
||||
if len(input_edges) > 0 and not isinstance(to_node, CollectInvocation):
|
||||
raise InvalidEdgeError(f"Edge already exists ({edge})")
|
||||
raise InvalidEdgeError(
|
||||
f"Edge to node {edge.destination.node_id} field {edge.destination.field} already exists"
|
||||
)
|
||||
|
||||
# Validate that no cycles would be created
|
||||
g = self.nx_graph_flat()
|
||||
g.add_edge(edge.source.node_id, edge.destination.node_id)
|
||||
if not nx.is_directed_acyclic_graph(g):
|
||||
raise InvalidEdgeError(f"Edge creates a cycle in the graph ({edge})")
|
||||
raise InvalidEdgeError(
|
||||
f"Edge creates a cycle in the graph: {edge.source.node_id} -> {edge.destination.node_id}"
|
||||
)
|
||||
|
||||
# Validate that the field types are compatible
|
||||
if not are_connections_compatible(from_node, edge.source.field, to_node, edge.destination.field):
|
||||
raise InvalidEdgeError(f"Field types are incompatible ({edge})")
|
||||
raise InvalidEdgeError(
|
||||
f"Fields are incompatible: cannot connect {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
|
||||
# Validate if iterator output type matches iterator input type (if this edge results in both being set)
|
||||
if isinstance(to_node, IterateInvocation) and edge.destination.field == "collection":
|
||||
err = self._is_iterator_connection_valid(edge.destination.node_id, new_input=edge.source)
|
||||
if err is not None:
|
||||
raise InvalidEdgeError(f"Iterator input type does not match iterator output type ({edge}): {err}")
|
||||
if not self._is_iterator_connection_valid(edge.destination.node_id, new_input=edge.source):
|
||||
raise InvalidEdgeError(
|
||||
f"Iterator input type does not match iterator output type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
|
||||
# Validate if iterator input type matches output type (if this edge results in both being set)
|
||||
if isinstance(from_node, IterateInvocation) and edge.source.field == "item":
|
||||
err = self._is_iterator_connection_valid(edge.source.node_id, new_output=edge.destination)
|
||||
if err is not None:
|
||||
raise InvalidEdgeError(f"Iterator output type does not match iterator input type ({edge}): {err}")
|
||||
if not self._is_iterator_connection_valid(edge.source.node_id, new_output=edge.destination):
|
||||
raise InvalidEdgeError(
|
||||
f"Iterator output type does not match iterator input type:, {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
|
||||
# Validate if collector input type matches output type (if this edge results in both being set)
|
||||
if isinstance(to_node, CollectInvocation) and edge.destination.field == "item":
|
||||
err = self._is_collector_connection_valid(edge.destination.node_id, new_input=edge.source)
|
||||
if err is not None:
|
||||
raise InvalidEdgeError(f"Collector output type does not match collector input type ({edge}): {err}")
|
||||
if not self._is_collector_connection_valid(edge.destination.node_id, new_input=edge.source):
|
||||
raise InvalidEdgeError(
|
||||
f"Collector output type does not match collector input type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
|
||||
# Validate that we are not connecting collector to iterator (currently unsupported)
|
||||
if isinstance(from_node, CollectInvocation) and isinstance(to_node, IterateInvocation):
|
||||
raise InvalidEdgeError(
|
||||
f"Cannot connect collector to iterator: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
|
||||
# Validate if collector output type matches input type (if this edge results in both being set) - skip if the destination field is not Any or list[Any]
|
||||
if (
|
||||
@@ -536,9 +548,10 @@ class Graph(BaseModel):
|
||||
and not self._is_destination_field_list_of_Any(edge)
|
||||
and not self._is_destination_field_Any(edge)
|
||||
):
|
||||
err = self._is_collector_connection_valid(edge.source.node_id, new_output=edge.destination)
|
||||
if err is not None:
|
||||
raise InvalidEdgeError(f"Collector input type does not match collector output type ({edge}): {err}")
|
||||
if not self._is_collector_connection_valid(edge.source.node_id, new_output=edge.destination):
|
||||
raise InvalidEdgeError(
|
||||
f"Collector input type does not match collector output type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}"
|
||||
)
|
||||
|
||||
def has_node(self, node_id: str) -> bool:
|
||||
"""Determines whether or not a node exists in the graph."""
|
||||
@@ -621,7 +634,7 @@ class Graph(BaseModel):
|
||||
node_id: str,
|
||||
new_input: Optional[EdgeConnection] = None,
|
||||
new_output: Optional[EdgeConnection] = None,
|
||||
) -> str | None:
|
||||
) -> bool:
|
||||
inputs = [e.source for e in self._get_input_edges(node_id, "collection")]
|
||||
outputs = [e.destination for e in self._get_output_edges(node_id, "item")]
|
||||
|
||||
@@ -632,47 +645,29 @@ class Graph(BaseModel):
|
||||
|
||||
# Only one input is allowed for iterators
|
||||
if len(inputs) > 1:
|
||||
return "Iterator may only have one input edge"
|
||||
|
||||
input_node = self.get_node(inputs[0].node_id)
|
||||
return False
|
||||
|
||||
# Get input and output fields (the fields linked to the iterator's input/output)
|
||||
input_field_type = get_output_field_type(input_node, inputs[0].field)
|
||||
output_field_types = [get_input_field_type(self.get_node(e.node_id), e.field) for e in outputs]
|
||||
input_field = get_output_field(self.get_node(inputs[0].node_id), inputs[0].field)
|
||||
output_fields = [get_input_field(self.get_node(e.node_id), e.field) for e in outputs]
|
||||
|
||||
# Input type must be a list
|
||||
if get_origin(input_field_type) is not list:
|
||||
return "Iterator input must be a collection"
|
||||
if get_origin(input_field) is not list:
|
||||
return False
|
||||
|
||||
# Validate that all outputs match the input type
|
||||
input_field_item_type = get_args(input_field_type)[0]
|
||||
if not all((are_connection_types_compatible(input_field_item_type, t) for t in output_field_types)):
|
||||
return "Iterator outputs must connect to an input with a matching type"
|
||||
input_field_item_type = get_args(input_field)[0]
|
||||
if not all((are_connection_types_compatible(input_field_item_type, f) for f in output_fields)):
|
||||
return False
|
||||
|
||||
# Collector input type must match all iterator output types
|
||||
if isinstance(input_node, CollectInvocation):
|
||||
# Traverse the graph to find the first collector input edge. Collectors validate that their collection
|
||||
# inputs are all of the same type, so we can use the first input edge to determine the collector's type
|
||||
first_collector_input_edge = self._get_input_edges(input_node.id, "item")[0]
|
||||
first_collector_input_type = get_output_field_type(
|
||||
self.get_node(first_collector_input_edge.source.node_id), first_collector_input_edge.source.field
|
||||
)
|
||||
resolved_collector_type = (
|
||||
first_collector_input_type
|
||||
if get_origin(first_collector_input_type) is None
|
||||
else get_args(first_collector_input_type)
|
||||
)
|
||||
if not all((are_connection_types_compatible(resolved_collector_type, t) for t in output_field_types)):
|
||||
return "Iterator collection type must match all iterator output types"
|
||||
|
||||
return None
|
||||
return True
|
||||
|
||||
def _is_collector_connection_valid(
|
||||
self,
|
||||
node_id: str,
|
||||
new_input: Optional[EdgeConnection] = None,
|
||||
new_output: Optional[EdgeConnection] = None,
|
||||
) -> str | None:
|
||||
) -> bool:
|
||||
inputs = [e.source for e in self._get_input_edges(node_id, "item")]
|
||||
outputs = [e.destination for e in self._get_output_edges(node_id, "collection")]
|
||||
|
||||
@@ -682,42 +677,38 @@ class Graph(BaseModel):
|
||||
outputs.append(new_output)
|
||||
|
||||
# Get input and output fields (the fields linked to the iterator's input/output)
|
||||
input_field_types = [get_output_field_type(self.get_node(e.node_id), e.field) for e in inputs]
|
||||
output_field_types = [get_input_field_type(self.get_node(e.node_id), e.field) for e in outputs]
|
||||
input_fields = [get_output_field(self.get_node(e.node_id), e.field) for e in inputs]
|
||||
output_fields = [get_input_field(self.get_node(e.node_id), e.field) for e in outputs]
|
||||
|
||||
# Validate that all inputs are derived from or match a single type
|
||||
input_field_types = {
|
||||
resolved_type
|
||||
for input_field_type in input_field_types
|
||||
for resolved_type in (
|
||||
[input_field_type] if get_origin(input_field_type) is None else get_args(input_field_type)
|
||||
)
|
||||
if resolved_type != NoneType
|
||||
t
|
||||
for input_field in input_fields
|
||||
for t in ([input_field] if get_origin(input_field) is None else get_args(input_field))
|
||||
if t != NoneType
|
||||
} # Get unique types
|
||||
type_tree = nx.DiGraph()
|
||||
type_tree.add_nodes_from(input_field_types)
|
||||
type_tree.add_edges_from([e for e in itertools.permutations(input_field_types, 2) if issubclass(e[1], e[0])])
|
||||
type_degrees = type_tree.in_degree(type_tree.nodes)
|
||||
if sum((t[1] == 0 for t in type_degrees)) != 1: # type: ignore
|
||||
return "Collector input collection items must be of a single type"
|
||||
return False # There is more than one root type
|
||||
|
||||
# Get the input root type
|
||||
input_root_type = next(t[0] for t in type_degrees if t[1] == 0) # type: ignore
|
||||
|
||||
# Verify that all outputs are lists
|
||||
if not all(is_list_or_contains_list(t) or is_any(t) for t in output_field_types):
|
||||
return "Collector output must connect to a collection input"
|
||||
if not all(is_list_or_contains_list(f) for f in output_fields):
|
||||
return False
|
||||
|
||||
# Verify that all outputs match the input type (are a base class or the same class)
|
||||
if not all(
|
||||
is_any(t)
|
||||
or is_union_subtype(input_root_type, get_args(t)[0])
|
||||
or issubclass(input_root_type, get_args(t)[0])
|
||||
for t in output_field_types
|
||||
is_union_subtype(input_root_type, get_args(f)[0]) or issubclass(input_root_type, get_args(f)[0])
|
||||
for f in output_fields
|
||||
):
|
||||
return "Collector outputs must connect to a collection input with a matching type"
|
||||
return False
|
||||
|
||||
return None
|
||||
return True
|
||||
|
||||
def nx_graph(self) -> nx.DiGraph:
|
||||
"""Returns a NetworkX DiGraph representing the layout of this graph"""
|
||||
|
||||
@@ -361,7 +361,9 @@ class ModelsInterface(InvocationContextInterface):
|
||||
return self._services.model_manager.store.exists(identifier.key)
|
||||
|
||||
def load(
|
||||
self, identifier: Union[str, "ModelIdentifierField"], submodel_type: Optional[SubModelType] = None
|
||||
self,
|
||||
identifier: Union[str, "ModelIdentifierField"],
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> LoadedModel:
|
||||
"""Load a model.
|
||||
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.backend.model_manager.config import BaseModelType, SubModelType
|
||||
|
||||
|
||||
def preprocess_t5_encoder_model_identifier(model_identifier: ModelIdentifierField) -> ModelIdentifierField:
|
||||
"""A helper function to normalize a T5 encoder model identifier so that T5 models associated with FLUX
|
||||
or SD3 models can be used interchangeably.
|
||||
"""
|
||||
if model_identifier.base == BaseModelType.Any:
|
||||
return model_identifier.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
elif model_identifier.base == BaseModelType.StableDiffusion3:
|
||||
return model_identifier.model_copy(update={"submodel_type": SubModelType.TextEncoder3})
|
||||
else:
|
||||
raise ValueError(f"Unsupported model base: {model_identifier.base}")
|
||||
|
||||
|
||||
def preprocess_t5_tokenizer_model_identifier(model_identifier: ModelIdentifierField) -> ModelIdentifierField:
|
||||
"""A helper function to normalize a T5 tokenizer model identifier so that T5 models associated with FLUX
|
||||
or SD3 models can be used interchangeably.
|
||||
"""
|
||||
if model_identifier.base == BaseModelType.Any:
|
||||
return model_identifier.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
elif model_identifier.base == BaseModelType.StableDiffusion3:
|
||||
return model_identifier.model_copy(update={"submodel_type": SubModelType.Tokenizer3})
|
||||
else:
|
||||
raise ValueError(f"Unsupported model base: {model_identifier.base}")
|
||||
@@ -8,7 +8,6 @@ from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
|
||||
|
||||
from invokeai.backend.flux.ip_adapter.xlabs_ip_adapter_flux import XlabsIpAdapterFlux
|
||||
from invokeai.backend.flux.modules.layers import DoubleStreamBlock
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
class XLabsIPAdapterExtension:
|
||||
@@ -46,7 +45,7 @@ class XLabsIPAdapterExtension:
|
||||
) -> torch.Tensor:
|
||||
clip_image_processor = CLIPImageProcessor()
|
||||
clip_image: torch.Tensor = clip_image_processor(images=pil_image, return_tensors="pt").pixel_values
|
||||
clip_image = clip_image.to(device=TorchDevice.choose_torch_device(), dtype=image_encoder.dtype)
|
||||
clip_image = clip_image.to(device=image_encoder.device, dtype=image_encoder.dtype)
|
||||
clip_image_embeds = image_encoder(clip_image).image_embeds
|
||||
return clip_image_embeds
|
||||
|
||||
|
||||
@@ -1,19 +1,11 @@
|
||||
# Initially pulled from https://github.com/black-forest-labs/flux
|
||||
|
||||
from torch import Tensor, nn
|
||||
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
|
||||
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from transformers import PreTrainedModel, PreTrainedTokenizer
|
||||
|
||||
|
||||
class HFEncoder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
encoder: PreTrainedModel,
|
||||
tokenizer: PreTrainedTokenizer | PreTrainedTokenizerFast,
|
||||
is_clip: bool,
|
||||
max_length: int,
|
||||
):
|
||||
def __init__(self, encoder: PreTrainedModel, tokenizer: PreTrainedTokenizer, is_clip: bool, max_length: int):
|
||||
super().__init__()
|
||||
self.max_length = max_length
|
||||
self.is_clip = is_clip
|
||||
@@ -34,7 +26,7 @@ class HFEncoder(nn.Module):
|
||||
)
|
||||
|
||||
outputs = self.hf_module(
|
||||
input_ids=batch_encoding["input_ids"].to(TorchDevice.choose_torch_device()),
|
||||
input_ids=batch_encoding["input_ids"].to(self.hf_module.device),
|
||||
attention_mask=None,
|
||||
output_hidden_states=False,
|
||||
)
|
||||
|
||||
@@ -18,7 +18,6 @@ from invokeai.backend.image_util.util import (
|
||||
resize_image_to_resolution,
|
||||
safe_step,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.utils import get_effective_device
|
||||
|
||||
|
||||
class DoubleConvBlock(torch.nn.Module):
|
||||
@@ -110,7 +109,7 @@ class HEDProcessor:
|
||||
Returns:
|
||||
The detected edges.
|
||||
"""
|
||||
device = get_effective_device(self.network)
|
||||
device = next(iter(self.network.parameters())).device
|
||||
np_image = pil_to_np(input_image)
|
||||
np_image = normalize_image_channel_count(np_image)
|
||||
np_image = resize_image_to_resolution(np_image, detect_resolution)
|
||||
@@ -184,7 +183,7 @@ class HEDEdgeDetector:
|
||||
The detected edges.
|
||||
"""
|
||||
|
||||
device = get_effective_device(self.model)
|
||||
device = next(iter(self.model.parameters())).device
|
||||
|
||||
np_image = pil_to_np(image)
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ from PIL import Image
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.backend.model_manager.config import AnyModel
|
||||
from invokeai.backend.model_manager.load.model_cache.utils import get_effective_device
|
||||
|
||||
|
||||
def norm_img(np_img):
|
||||
@@ -32,7 +31,7 @@ class LaMA:
|
||||
mask = norm_img(mask)
|
||||
mask = (mask > 0) * 1
|
||||
|
||||
device = get_effective_device(self._model)
|
||||
device = next(self._model.buffers()).device
|
||||
image = torch.from_numpy(image).unsqueeze(0).to(device)
|
||||
mask = torch.from_numpy(mask).unsqueeze(0).to(device)
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ from invokeai.backend.image_util.util import (
|
||||
pil_to_np,
|
||||
resize_image_to_resolution,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.utils import get_effective_device
|
||||
|
||||
|
||||
class ResidualBlock(nn.Module):
|
||||
@@ -131,7 +130,7 @@ class LineartProcessor:
|
||||
Returns:
|
||||
The detected lineart.
|
||||
"""
|
||||
device = get_effective_device(self.model)
|
||||
device = next(iter(self.model.parameters())).device
|
||||
|
||||
np_image = pil_to_np(input_image)
|
||||
np_image = normalize_image_channel_count(np_image)
|
||||
@@ -202,7 +201,7 @@ class LineartEdgeDetector:
|
||||
Returns:
|
||||
The detected edges.
|
||||
"""
|
||||
device = get_effective_device(self.model)
|
||||
device = next(iter(self.model.parameters())).device
|
||||
|
||||
np_image = pil_to_np(image)
|
||||
|
||||
|
||||
@@ -19,7 +19,6 @@ from invokeai.backend.image_util.util import (
|
||||
pil_to_np,
|
||||
resize_image_to_resolution,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.utils import get_effective_device
|
||||
|
||||
|
||||
class UnetGenerator(nn.Module):
|
||||
@@ -172,7 +171,7 @@ class LineartAnimeProcessor:
|
||||
Returns:
|
||||
The detected lineart.
|
||||
"""
|
||||
device = get_effective_device(self.model)
|
||||
device = next(iter(self.model.parameters())).device
|
||||
np_image = pil_to_np(input_image)
|
||||
|
||||
np_image = normalize_image_channel_count(np_image)
|
||||
@@ -240,7 +239,7 @@ class LineartAnimeEdgeDetector:
|
||||
|
||||
def run(self, image: Image.Image) -> Image.Image:
|
||||
"""Processes an image and returns the detected edges."""
|
||||
device = get_effective_device(self.model)
|
||||
device = next(iter(self.model.parameters())).device
|
||||
|
||||
np_image = pil_to_np(image)
|
||||
|
||||
|
||||
@@ -14,8 +14,6 @@ import numpy as np
|
||||
import torch
|
||||
from torch.nn import functional as F
|
||||
|
||||
from invokeai.backend.model_manager.load.model_cache.utils import get_effective_device
|
||||
|
||||
|
||||
def deccode_output_score_and_ptss(tpMap, topk_n = 200, ksize = 5):
|
||||
'''
|
||||
@@ -51,7 +49,7 @@ def pred_lines(image, model,
|
||||
dist_thr=20.0):
|
||||
h, w, _ = image.shape
|
||||
|
||||
device = get_effective_device(model)
|
||||
device = next(iter(model.parameters())).device
|
||||
h_ratio, w_ratio = [h / input_shape[0], w / input_shape[1]]
|
||||
|
||||
resized_image = np.concatenate([cv2.resize(image, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA),
|
||||
@@ -110,7 +108,7 @@ def pred_squares(image,
|
||||
'''
|
||||
h, w, _ = image.shape
|
||||
original_shape = [h, w]
|
||||
device = get_effective_device(model)
|
||||
device = next(iter(model.parameters())).device
|
||||
|
||||
resized_image = np.concatenate([cv2.resize(image, (input_shape[0], input_shape[1]), interpolation=cv2.INTER_AREA),
|
||||
np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
|
||||
|
||||
@@ -13,7 +13,6 @@ from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.normal_bae.nets.NNET import NNET
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np, resize_to_multiple
|
||||
from invokeai.backend.model_manager.load.model_cache.utils import get_effective_device
|
||||
|
||||
|
||||
class NormalMapDetector:
|
||||
@@ -65,7 +64,7 @@ class NormalMapDetector:
|
||||
def run(self, image: Image.Image):
|
||||
"""Processes an image and returns the detected normal map."""
|
||||
|
||||
device = get_effective_device(self.model)
|
||||
device = next(iter(self.model.parameters())).device
|
||||
np_image = pil_to_np(image)
|
||||
|
||||
height, width, _channels = np_image.shape
|
||||
|
||||
@@ -11,7 +11,6 @@ from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.pidi.model import PiDiNet, pidinet
|
||||
from invokeai.backend.image_util.util import nms, normalize_image_channel_count, np_to_pil, pil_to_np, safe_step
|
||||
from invokeai.backend.model_manager.load.model_cache.utils import get_effective_device
|
||||
|
||||
|
||||
class PIDINetDetector:
|
||||
@@ -46,7 +45,7 @@ class PIDINetDetector:
|
||||
) -> Image.Image:
|
||||
"""Processes an image and returns the detected edges."""
|
||||
|
||||
device = get_effective_device(self.model)
|
||||
device = next(iter(self.model.parameters())).device
|
||||
|
||||
np_img = pil_to_np(image)
|
||||
np_img = normalize_image_channel_count(np_img)
|
||||
|
||||
@@ -292,18 +292,6 @@ class ControlLoRALyCORISConfig(ModelConfigBase, ControlAdapterConfigBase):
|
||||
return Tag(f"{ModelType.ControlLoRa.value}.{ModelFormat.LyCORIS.value}")
|
||||
|
||||
|
||||
class ControlLoRADiffusersConfig(ModelConfigBase, ControlAdapterConfigBase):
|
||||
"""Model config for Control LoRA models."""
|
||||
|
||||
type: Literal[ModelType.ControlLoRa] = ModelType.ControlLoRa
|
||||
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
|
||||
format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
return Tag(f"{ModelType.ControlLoRa.value}.{ModelFormat.Diffusers.value}")
|
||||
|
||||
|
||||
class LoRADiffusersConfig(LoRAConfigBase):
|
||||
"""Model config for LoRA/Diffusers models."""
|
||||
|
||||
@@ -561,7 +549,6 @@ AnyModelConfig = Annotated[
|
||||
Annotated[ControlNetCheckpointConfig, ControlNetCheckpointConfig.get_tag()],
|
||||
Annotated[LoRALyCORISConfig, LoRALyCORISConfig.get_tag()],
|
||||
Annotated[ControlLoRALyCORISConfig, ControlLoRALyCORISConfig.get_tag()],
|
||||
Annotated[ControlLoRADiffusersConfig, ControlLoRADiffusersConfig.get_tag()],
|
||||
Annotated[LoRADiffusersConfig, LoRADiffusersConfig.get_tag()],
|
||||
Annotated[T5EncoderConfig, T5EncoderConfig.get_tag()],
|
||||
Annotated[T5EncoderBnbQuantizedLlmInt8bConfig, T5EncoderBnbQuantizedLlmInt8bConfig.get_tag()],
|
||||
|
||||
@@ -57,11 +57,11 @@ class LoadedModelWithoutConfig:
|
||||
self._cache = cache
|
||||
|
||||
def __enter__(self) -> AnyModel:
|
||||
self._cache.lock(self._cache_record, None)
|
||||
self._cache.lock(self._cache_record.key, None)
|
||||
return self.model
|
||||
|
||||
def __exit__(self, *args: Any, **kwargs: Any) -> None:
|
||||
self._cache.unlock(self._cache_record)
|
||||
self._cache.unlock(self._cache_record.key)
|
||||
|
||||
@contextmanager
|
||||
def model_on_device(
|
||||
@@ -72,11 +72,11 @@ class LoadedModelWithoutConfig:
|
||||
:param working_mem_bytes: The amount of working memory to keep available on the compute device when loading the
|
||||
model.
|
||||
"""
|
||||
self._cache.lock(self._cache_record, working_mem_bytes)
|
||||
self._cache.lock(self._cache_record.key, working_mem_bytes)
|
||||
try:
|
||||
yield (self._cache_record.cached_model.get_cpu_state_dict(), self._cache_record.cached_model.model)
|
||||
finally:
|
||||
self._cache.unlock(self._cache_record)
|
||||
self._cache.unlock(self._cache_record.key)
|
||||
|
||||
@property
|
||||
def model(self) -> AnyModel:
|
||||
@@ -87,7 +87,12 @@ class LoadedModelWithoutConfig:
|
||||
class LoadedModel(LoadedModelWithoutConfig):
|
||||
"""Context manager object that mediates transfer from RAM<->VRAM."""
|
||||
|
||||
def __init__(self, config: Optional[AnyModelConfig], cache_record: CacheRecord, cache: ModelCache):
|
||||
def __init__(
|
||||
self,
|
||||
config: Optional[AnyModelConfig],
|
||||
cache_record: CacheRecord,
|
||||
cache: ModelCache,
|
||||
):
|
||||
super().__init__(cache_record=cache_record, cache=cache)
|
||||
self.config = config
|
||||
|
||||
@@ -114,7 +119,11 @@ class ModelLoaderBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel:
|
||||
def load_model(
|
||||
self,
|
||||
model_config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> LoadedModel:
|
||||
"""
|
||||
Return a model given its confguration.
|
||||
|
||||
|
||||
@@ -38,7 +38,11 @@ class ModelLoader(ModelLoaderBase):
|
||||
self._torch_dtype = TorchDevice.choose_torch_dtype()
|
||||
self._torch_device = TorchDevice.choose_torch_device()
|
||||
|
||||
def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel:
|
||||
def load_model(
|
||||
self,
|
||||
model_config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> LoadedModel:
|
||||
"""
|
||||
Return a model given its configuration.
|
||||
|
||||
|
||||
@@ -16,18 +16,16 @@ class CacheRecord:
|
||||
key: str
|
||||
# Model in memory.
|
||||
cached_model: CachedModelWithPartialLoad | CachedModelOnlyFullLoad
|
||||
# If locks > 0, the model is actively being used, so we should do our best to keep it on the compute device.
|
||||
_locks: int = 0
|
||||
|
||||
def lock(self) -> None:
|
||||
"""Lock this record."""
|
||||
self._locks += 1
|
||||
|
||||
def unlock(self) -> None:
|
||||
"""Unlock this record."""
|
||||
self._locks -= 1
|
||||
assert self._locks >= 0
|
||||
|
||||
@property
|
||||
def is_locked(self) -> bool:
|
||||
"""Return true if record is locked."""
|
||||
return self._locks > 0
|
||||
|
||||
@@ -11,5 +11,4 @@ class CacheStats(object):
|
||||
high_watermark: int = 0 # amount of cache used
|
||||
in_cache: int = 0 # number of models in cache
|
||||
cleared: int = 0 # number of models cleared to make space
|
||||
cache_size: int = 0 # total size of cache
|
||||
loaded_model_sizes: Dict[str, int] = field(default_factory=dict)
|
||||
|
||||
@@ -5,21 +5,18 @@ import torch
|
||||
|
||||
class CachedModelOnlyFullLoad:
|
||||
"""A wrapper around a PyTorch model to handle full loads and unloads between the CPU and the compute device.
|
||||
|
||||
Note: "VRAM" is used throughout this class to refer to the memory on the compute device. It could be CUDA memory,
|
||||
MPS memory, etc.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, model: torch.nn.Module | Any, compute_device: torch.device, total_bytes: int, keep_ram_copy: bool = False
|
||||
):
|
||||
def __init__(self, model: torch.nn.Module | Any, compute_device: torch.device, total_bytes: int):
|
||||
"""Initialize a CachedModelOnlyFullLoad.
|
||||
|
||||
Args:
|
||||
model (torch.nn.Module | Any): The model to wrap. Should be on the CPU.
|
||||
compute_device (torch.device): The compute device to move the model to.
|
||||
total_bytes (int): The total size (in bytes) of all the weights in the model.
|
||||
keep_ram_copy (bool): Whether to keep a read-only copy of the model's state dict in RAM. Keeping a RAM copy
|
||||
increases RAM usage, but speeds up model offload from VRAM and LoRA patching (assuming there is
|
||||
sufficient RAM).
|
||||
"""
|
||||
# model is often a torch.nn.Module, but could be any model type. Throughout this class, we handle both cases.
|
||||
self._model = model
|
||||
@@ -28,7 +25,7 @@ class CachedModelOnlyFullLoad:
|
||||
|
||||
# A CPU read-only copy of the model's state dict.
|
||||
self._cpu_state_dict: dict[str, torch.Tensor] | None = None
|
||||
if isinstance(model, torch.nn.Module) and keep_ram_copy:
|
||||
if isinstance(model, torch.nn.Module):
|
||||
self._cpu_state_dict = model.state_dict()
|
||||
|
||||
self._total_bytes = total_bytes
|
||||
@@ -60,6 +57,7 @@ class CachedModelOnlyFullLoad:
|
||||
|
||||
def full_load_to_vram(self) -> int:
|
||||
"""Load all weights into VRAM (if supported by the model).
|
||||
|
||||
Returns:
|
||||
The number of bytes loaded into VRAM.
|
||||
"""
|
||||
@@ -83,6 +81,7 @@ class CachedModelOnlyFullLoad:
|
||||
|
||||
def full_unload_from_vram(self) -> int:
|
||||
"""Unload all weights from VRAM.
|
||||
|
||||
Returns:
|
||||
The number of bytes unloaded from VRAM.
|
||||
"""
|
||||
|
||||
@@ -1,10 +1,22 @@
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_module_mixin import (
|
||||
CustomModuleMixin,
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_function_autocast_context import (
|
||||
add_autocast_to_module_forward,
|
||||
remove_autocast_from_module_forward,
|
||||
)
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
|
||||
def set_nested_attr(obj: object, attr: str, value: object):
|
||||
"""A helper function that extends setattr() to support nested attributes.
|
||||
|
||||
Example:
|
||||
set_nested_attr(model, "module.encoder.conv1.weight", new_conv1_weight)
|
||||
"""
|
||||
attrs = attr.split(".")
|
||||
for attr in attrs[:-1]:
|
||||
obj = getattr(obj, attr)
|
||||
setattr(obj, attrs[-1], value)
|
||||
|
||||
|
||||
class CachedModelWithPartialLoad:
|
||||
@@ -14,103 +26,19 @@ class CachedModelWithPartialLoad:
|
||||
MPS memory, etc.
|
||||
"""
|
||||
|
||||
def __init__(self, model: torch.nn.Module, compute_device: torch.device, keep_ram_copy: bool = False):
|
||||
def __init__(self, model: torch.nn.Module, compute_device: torch.device):
|
||||
self._model = model
|
||||
self._compute_device = compute_device
|
||||
|
||||
model_state_dict = model.state_dict()
|
||||
# A CPU read-only copy of the model's state dict. Used for faster model unloads from VRAM, and to speed up LoRA
|
||||
# patching. Set to `None` if keep_ram_copy is False.
|
||||
self._cpu_state_dict: dict[str, torch.Tensor] | None = model_state_dict if keep_ram_copy else None
|
||||
# A CPU read-only copy of the model's state dict.
|
||||
self._cpu_state_dict: dict[str, torch.Tensor] = model.state_dict()
|
||||
|
||||
# A dictionary of the size of each tensor in the state dict.
|
||||
# HACK(ryand): We use this dictionary any time we are doing byte tracking calculations. We do this for
|
||||
# consistency in case the application code has modified the model's size (e.g. by casting to a different
|
||||
# precision). Of course, this means that we are making model cache load/unload decisions based on model size
|
||||
# data that may not be fully accurate.
|
||||
self._state_dict_bytes = {k: calc_tensor_size(v) for k, v in model_state_dict.items()}
|
||||
|
||||
self._total_bytes = sum(self._state_dict_bytes.values())
|
||||
# TODO(ryand): Handle the case where the model sizes changes after initial load (e.g. due to dtype casting).
|
||||
# Consider how we should handle this for both self._total_bytes and self._cur_vram_bytes.
|
||||
self._total_bytes = sum(calc_tensor_size(p) for p in self._cpu_state_dict.values())
|
||||
self._cur_vram_bytes: int | None = None
|
||||
|
||||
self._modules_that_support_autocast = self._find_modules_that_support_autocast()
|
||||
self._keys_in_modules_that_do_not_support_autocast = self._find_keys_in_modules_that_do_not_support_autocast(
|
||||
model_state_dict
|
||||
)
|
||||
self._state_dict_keys_by_module_prefix = self._group_state_dict_keys_by_module_prefix(model_state_dict)
|
||||
|
||||
def _find_modules_that_support_autocast(self) -> dict[str, torch.nn.Module]:
|
||||
"""Find all modules that support autocasting."""
|
||||
return {n: m for n, m in self._model.named_modules() if isinstance(m, CustomModuleMixin)} # type: ignore
|
||||
|
||||
def _find_keys_in_modules_that_do_not_support_autocast(self, state_dict: dict[str, torch.Tensor]) -> set[str]:
|
||||
keys_in_modules_that_do_not_support_autocast: set[str] = set()
|
||||
for key in state_dict.keys():
|
||||
for module_name in self._modules_that_support_autocast.keys():
|
||||
if key.startswith(module_name):
|
||||
break
|
||||
else:
|
||||
keys_in_modules_that_do_not_support_autocast.add(key)
|
||||
return keys_in_modules_that_do_not_support_autocast
|
||||
|
||||
def _group_state_dict_keys_by_module_prefix(self, state_dict: dict[str, torch.Tensor]) -> dict[str, list[str]]:
|
||||
"""A helper function that groups state dict keys by module prefix.
|
||||
|
||||
Example:
|
||||
```
|
||||
state_dict = {
|
||||
"weight": ...,
|
||||
"module.submodule.weight": ...,
|
||||
"module.submodule.bias": ...,
|
||||
"module.other_submodule.weight": ...,
|
||||
"module.other_submodule.bias": ...,
|
||||
}
|
||||
|
||||
output = group_state_dict_keys_by_module_prefix(state_dict)
|
||||
|
||||
# The output will be:
|
||||
output = {
|
||||
"": [
|
||||
"weight",
|
||||
],
|
||||
"module.submodule": [
|
||||
"module.submodule.weight",
|
||||
"module.submodule.bias",
|
||||
],
|
||||
"module.other_submodule": [
|
||||
"module.other_submodule.weight",
|
||||
"module.other_submodule.bias",
|
||||
],
|
||||
}
|
||||
```
|
||||
"""
|
||||
state_dict_keys_by_module_prefix: dict[str, list[str]] = {}
|
||||
for key in state_dict.keys():
|
||||
split = key.rsplit(".", 1)
|
||||
# `split` will have length 1 if the root module has parameters.
|
||||
module_name = split[0] if len(split) > 1 else ""
|
||||
if module_name not in state_dict_keys_by_module_prefix:
|
||||
state_dict_keys_by_module_prefix[module_name] = []
|
||||
state_dict_keys_by_module_prefix[module_name].append(key)
|
||||
return state_dict_keys_by_module_prefix
|
||||
|
||||
def _move_non_persistent_buffers_to_device(self, device: torch.device):
|
||||
"""Move the non-persistent buffers to the target device. These buffers are not included in the state dict,
|
||||
so we need to move them manually.
|
||||
"""
|
||||
# HACK(ryand): Typically, non-persistent buffers are moved when calling module.to(device). We don't move entire
|
||||
# modules, because we manage the devices of individual tensors using the state dict. Since non-persistent
|
||||
# buffers are not included in the state dict, we need to handle them manually. The only way to do this is by
|
||||
# using private torch.nn.Module attributes.
|
||||
for module in self._model.modules():
|
||||
for name, buffer in module.named_buffers():
|
||||
if name in module._non_persistent_buffers_set:
|
||||
module._buffers[name] = buffer.to(device, copy=True)
|
||||
|
||||
def _set_autocast_enabled_in_all_modules(self, enabled: bool):
|
||||
"""Set autocast_enabled flag in all modules that support device autocasting."""
|
||||
for module in self._modules_that_support_autocast.values():
|
||||
module.set_device_autocasting_enabled(enabled)
|
||||
self._update_model_autocast_context()
|
||||
|
||||
@property
|
||||
def model(self) -> torch.nn.Module:
|
||||
@@ -130,9 +58,7 @@ class CachedModelWithPartialLoad:
|
||||
if self._cur_vram_bytes is None:
|
||||
cur_state_dict = self._model.state_dict()
|
||||
self._cur_vram_bytes = sum(
|
||||
self._state_dict_bytes[k]
|
||||
for k, v in cur_state_dict.items()
|
||||
if v.device.type == self._compute_device.type
|
||||
calc_tensor_size(p) for p in cur_state_dict.values() if p.device.type == self._compute_device.type
|
||||
)
|
||||
return self._cur_vram_bytes
|
||||
|
||||
@@ -144,82 +70,6 @@ class CachedModelWithPartialLoad:
|
||||
"""Unload all weights from VRAM."""
|
||||
return self.partial_unload_from_vram(self.total_bytes())
|
||||
|
||||
def _load_state_dict_with_device_conversion(
|
||||
self, state_dict: dict[str, torch.Tensor], keys_to_convert: set[str], target_device: torch.device
|
||||
):
|
||||
if self._cpu_state_dict is not None:
|
||||
# Run the fast version.
|
||||
self._load_state_dict_with_fast_device_conversion(
|
||||
state_dict=state_dict,
|
||||
keys_to_convert=keys_to_convert,
|
||||
target_device=target_device,
|
||||
cpu_state_dict=self._cpu_state_dict,
|
||||
)
|
||||
else:
|
||||
# Run the low-virtual-memory version.
|
||||
self._load_state_dict_with_jit_device_conversion(
|
||||
state_dict=state_dict,
|
||||
keys_to_convert=keys_to_convert,
|
||||
target_device=target_device,
|
||||
)
|
||||
|
||||
def _load_state_dict_with_jit_device_conversion(
|
||||
self,
|
||||
state_dict: dict[str, torch.Tensor],
|
||||
keys_to_convert: set[str],
|
||||
target_device: torch.device,
|
||||
):
|
||||
"""A custom state dict loading implementation with good peak memory properties.
|
||||
|
||||
This implementation has the important property that it copies parameters to the target device one module at a time
|
||||
rather than applying all of the device conversions and then calling load_state_dict(). This is done to minimize the
|
||||
peak virtual memory usage. Specifically, we want to avoid a case where we hold references to all of the CPU weights
|
||||
and CUDA weights simultaneously, because Windows will reserve virtual memory for both.
|
||||
"""
|
||||
for module_name, module in self._model.named_modules():
|
||||
module_keys = self._state_dict_keys_by_module_prefix.get(module_name, [])
|
||||
# Calculate the length of the module name prefix.
|
||||
prefix_len = len(module_name)
|
||||
if prefix_len > 0:
|
||||
prefix_len += 1
|
||||
|
||||
module_state_dict = {}
|
||||
for key in module_keys:
|
||||
if key in keys_to_convert:
|
||||
# It is important that we overwrite `state_dict[key]` to avoid keeping two copies of the same
|
||||
# parameter.
|
||||
state_dict[key] = state_dict[key].to(target_device)
|
||||
# Note that we keep parameters that have not been moved to a new device in case the module implements
|
||||
# weird custom state dict loading logic that requires all parameters to be present.
|
||||
module_state_dict[key[prefix_len:]] = state_dict[key]
|
||||
|
||||
if len(module_state_dict) > 0:
|
||||
# We set strict=False, because if `module` has both parameters and child modules, then we are loading a
|
||||
# state dict that only contains the parameters of `module` (not its children).
|
||||
# We assume that it is rare for non-leaf modules to have parameters. Calling load_state_dict() on non-leaf
|
||||
# modules will recurse through all of the children, so is a bit wasteful.
|
||||
incompatible_keys = module.load_state_dict(module_state_dict, strict=False, assign=True)
|
||||
# Missing keys are ok, unexpected keys are not.
|
||||
assert len(incompatible_keys.unexpected_keys) == 0
|
||||
|
||||
def _load_state_dict_with_fast_device_conversion(
|
||||
self,
|
||||
state_dict: dict[str, torch.Tensor],
|
||||
keys_to_convert: set[str],
|
||||
target_device: torch.device,
|
||||
cpu_state_dict: dict[str, torch.Tensor],
|
||||
):
|
||||
"""Convert parameters to the target device and load them into the model. Leverages the `cpu_state_dict` to speed
|
||||
up transfers of weights to the CPU.
|
||||
"""
|
||||
for key in keys_to_convert:
|
||||
if target_device.type == "cpu":
|
||||
state_dict[key] = cpu_state_dict[key]
|
||||
else:
|
||||
state_dict[key] = state_dict[key].to(target_device)
|
||||
|
||||
self._model.load_state_dict(state_dict, assign=True)
|
||||
|
||||
@torch.no_grad()
|
||||
def partial_load_to_vram(self, vram_bytes_to_load: int) -> int:
|
||||
"""Load more weights into VRAM without exceeding vram_bytes_to_load.
|
||||
@@ -234,85 +84,47 @@ class CachedModelWithPartialLoad:
|
||||
|
||||
cur_state_dict = self._model.state_dict()
|
||||
|
||||
# Identify the keys that will be loaded into VRAM.
|
||||
keys_to_load: set[str] = set()
|
||||
|
||||
# First, process the keys that *must* be loaded into VRAM.
|
||||
for key in self._keys_in_modules_that_do_not_support_autocast:
|
||||
param = cur_state_dict[key]
|
||||
if param.device.type == self._compute_device.type:
|
||||
continue
|
||||
|
||||
keys_to_load.add(key)
|
||||
param_size = self._state_dict_bytes[key]
|
||||
vram_bytes_loaded += param_size
|
||||
|
||||
if vram_bytes_loaded > vram_bytes_to_load:
|
||||
logger = InvokeAILogger.get_logger()
|
||||
logger.warning(
|
||||
f"Loading {vram_bytes_loaded / 2**20} MB into VRAM, but only {vram_bytes_to_load / 2**20} MB were "
|
||||
"requested. This is the minimum set of weights in VRAM required to run the model."
|
||||
)
|
||||
|
||||
# Next, process the keys that can optionally be loaded into VRAM.
|
||||
fully_loaded = True
|
||||
for key, param in cur_state_dict.items():
|
||||
# Skip the keys that have already been processed above.
|
||||
if key in keys_to_load:
|
||||
continue
|
||||
|
||||
if param.device.type == self._compute_device.type:
|
||||
continue
|
||||
|
||||
param_size = self._state_dict_bytes[key]
|
||||
param_size = calc_tensor_size(param)
|
||||
if vram_bytes_loaded + param_size > vram_bytes_to_load:
|
||||
# TODO(ryand): Should we just break here? If we couldn't fit this parameter into VRAM, is it really
|
||||
# worth continuing to search for a smaller parameter that would fit?
|
||||
fully_loaded = False
|
||||
continue
|
||||
|
||||
keys_to_load.add(key)
|
||||
cur_state_dict[key] = param.to(self._compute_device, copy=True)
|
||||
vram_bytes_loaded += param_size
|
||||
|
||||
if len(keys_to_load) > 0:
|
||||
if vram_bytes_loaded > 0:
|
||||
# We load the entire state dict, not just the parameters that changed, in case there are modules that
|
||||
# override _load_from_state_dict() and do some funky stuff that requires the entire state dict.
|
||||
# Alternatively, in the future, grouping parameters by module could probably solve this problem.
|
||||
self._load_state_dict_with_device_conversion(cur_state_dict, keys_to_load, self._compute_device)
|
||||
self._model.load_state_dict(cur_state_dict, assign=True)
|
||||
|
||||
if self._cur_vram_bytes is not None:
|
||||
self._cur_vram_bytes += vram_bytes_loaded
|
||||
|
||||
if fully_loaded:
|
||||
self._set_autocast_enabled_in_all_modules(False)
|
||||
else:
|
||||
self._set_autocast_enabled_in_all_modules(True)
|
||||
|
||||
# Move all non-persistent buffers to the compute device. These are a weird edge case and do not participate in
|
||||
# the vram_bytes_loaded tracking.
|
||||
self._move_non_persistent_buffers_to_device(self._compute_device)
|
||||
if self._cur_vram_bytes == self.total_bytes():
|
||||
# HACK(ryand): The model should already be on the compute device, but we have to call this to ensure that
|
||||
# all non-persistent buffers are moved (i.e. buffers that are not registered in the state dict).
|
||||
self._model.to(self._compute_device)
|
||||
|
||||
self._update_model_autocast_context()
|
||||
return vram_bytes_loaded
|
||||
|
||||
@torch.no_grad()
|
||||
def partial_unload_from_vram(self, vram_bytes_to_free: int, keep_required_weights_in_vram: bool = False) -> int:
|
||||
def partial_unload_from_vram(self, vram_bytes_to_free: int) -> int:
|
||||
"""Unload weights from VRAM until vram_bytes_to_free bytes are freed. Or the entire model is unloaded.
|
||||
|
||||
:param keep_required_weights_in_vram: If True, any weights that must be kept in VRAM to run the model will be
|
||||
kept in VRAM.
|
||||
|
||||
Returns:
|
||||
The number of bytes unloaded from VRAM.
|
||||
"""
|
||||
vram_bytes_freed = 0
|
||||
required_weights_in_vram = 0
|
||||
|
||||
offload_device = "cpu"
|
||||
cur_state_dict = self._model.state_dict()
|
||||
|
||||
# Identify the keys that will be offloaded to CPU.
|
||||
keys_to_offload: set[str] = set()
|
||||
|
||||
for key, param in cur_state_dict.items():
|
||||
if vram_bytes_freed >= vram_bytes_to_free:
|
||||
break
|
||||
@@ -320,20 +132,26 @@ class CachedModelWithPartialLoad:
|
||||
if param.device.type == offload_device:
|
||||
continue
|
||||
|
||||
if keep_required_weights_in_vram and key in self._keys_in_modules_that_do_not_support_autocast:
|
||||
required_weights_in_vram += self._state_dict_bytes[key]
|
||||
continue
|
||||
cur_state_dict[key] = self._cpu_state_dict[key]
|
||||
vram_bytes_freed += calc_tensor_size(param)
|
||||
|
||||
keys_to_offload.add(key)
|
||||
vram_bytes_freed += self._state_dict_bytes[key]
|
||||
|
||||
if len(keys_to_offload) > 0:
|
||||
self._load_state_dict_with_device_conversion(cur_state_dict, keys_to_offload, torch.device("cpu"))
|
||||
if vram_bytes_freed > 0:
|
||||
self._model.load_state_dict(cur_state_dict, assign=True)
|
||||
|
||||
if self._cur_vram_bytes is not None:
|
||||
self._cur_vram_bytes -= vram_bytes_freed
|
||||
|
||||
# We may have gone from a fully-loaded model to a partially-loaded model, so we need to reapply the custom
|
||||
# layers.
|
||||
self._set_autocast_enabled_in_all_modules(True)
|
||||
self._update_model_autocast_context()
|
||||
return vram_bytes_freed
|
||||
|
||||
def _update_model_autocast_context(self):
|
||||
"""A helper function that should be called whenever the model's VRAM usage changes to add/remove the autocast
|
||||
context.
|
||||
"""
|
||||
if self.cur_vram_bytes() == self.total_bytes():
|
||||
# We remove the autocast context when the model is fully loaded into VRAM, because the context causes some
|
||||
# runtime overhead.
|
||||
remove_autocast_from_module_forward(self._model)
|
||||
else:
|
||||
# Monkey-patch the model to add autocasting to the model's forward method.
|
||||
add_autocast_to_module_forward(self._model, self._compute_device)
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
from contextlib import contextmanager
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
|
||||
@contextmanager
|
||||
def log_operation_vram_usage(operation_name: str):
|
||||
"""A helper function for tuning working memory requirements for memory-intensive ops.
|
||||
|
||||
Sample usage:
|
||||
|
||||
```python
|
||||
with log_operation_vram_usage("some_operation"):
|
||||
some_operation()
|
||||
```
|
||||
"""
|
||||
torch.cuda.synchronize()
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
max_allocated_before = torch.cuda.max_memory_allocated()
|
||||
max_reserved_before = torch.cuda.max_memory_reserved()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
torch.cuda.synchronize()
|
||||
max_allocated_after = torch.cuda.max_memory_allocated()
|
||||
max_reserved_after = torch.cuda.max_memory_reserved()
|
||||
logger = InvokeAILogger.get_logger()
|
||||
logger.info(
|
||||
f">>>{operation_name} Peak VRAM allocated: {(max_allocated_after - max_allocated_before) / 2**20} MB, "
|
||||
f"Peak VRAM reserved: {(max_reserved_after - max_reserved_before) / 2**20} MB"
|
||||
)
|
||||
@@ -1,14 +1,15 @@
|
||||
import gc
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from functools import wraps
|
||||
from logging import Logger
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
|
||||
from invokeai.backend.flux.ip_adapter.xlabs_ip_adapter_flux import XlabsIpAdapterFlux
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.model_manager import AnyModel, SubModelType
|
||||
from invokeai.backend.model_manager.load.memory_snapshot import MemorySnapshot
|
||||
from invokeai.backend.model_manager.load.model_cache.cache_record import CacheRecord
|
||||
@@ -19,9 +20,6 @@ from invokeai.backend.model_manager.load.model_cache.cached_model.cached_model_o
|
||||
from invokeai.backend.model_manager.load.model_cache.cached_model.cached_model_with_partial_load import (
|
||||
CachedModelWithPartialLoad,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.torch_module_autocast import (
|
||||
apply_custom_layers_to_model,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
@@ -43,17 +41,6 @@ def get_model_cache_key(model_key: str, submodel_type: Optional[SubModelType] =
|
||||
return model_key
|
||||
|
||||
|
||||
def synchronized(method: Callable[..., Any]) -> Callable[..., Any]:
|
||||
"""A decorator that applies the class's self._lock to the method."""
|
||||
|
||||
@wraps(method)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
with self._lock: # Automatically acquire and release the lock
|
||||
return method(self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class ModelCache:
|
||||
"""A cache for managing models in memory.
|
||||
|
||||
@@ -91,7 +78,6 @@ class ModelCache:
|
||||
self,
|
||||
execution_device_working_mem_gb: float,
|
||||
enable_partial_loading: bool,
|
||||
keep_ram_copy_of_weights: bool,
|
||||
max_ram_cache_size_gb: float | None = None,
|
||||
max_vram_cache_size_gb: float | None = None,
|
||||
execution_device: torch.device | str = "cuda",
|
||||
@@ -119,7 +105,6 @@ class ModelCache:
|
||||
:param logger: InvokeAILogger to use (otherwise creates one)
|
||||
"""
|
||||
self._enable_partial_loading = enable_partial_loading
|
||||
self._keep_ram_copy_of_weights = keep_ram_copy_of_weights
|
||||
self._execution_device_working_mem_gb = execution_device_working_mem_gb
|
||||
self._execution_device: torch.device = torch.device(execution_device)
|
||||
self._storage_device: torch.device = torch.device(storage_device)
|
||||
@@ -136,27 +121,16 @@ class ModelCache:
|
||||
self._cached_models: Dict[str, CacheRecord] = {}
|
||||
self._cache_stack: List[str] = []
|
||||
|
||||
self._ram_cache_size_bytes = self._calc_ram_available_to_model_cache()
|
||||
|
||||
# A lock applied to all public method calls to make the ModelCache thread-safe.
|
||||
# At the time of writing, the ModelCache should only be accessed from two threads:
|
||||
# - The graph execution thread
|
||||
# - Requests to empty the cache from a separate thread
|
||||
self._lock = threading.RLock()
|
||||
|
||||
@property
|
||||
@synchronized
|
||||
def stats(self) -> Optional[CacheStats]:
|
||||
"""Return collected CacheStats object."""
|
||||
return self._stats
|
||||
|
||||
@stats.setter
|
||||
@synchronized
|
||||
def stats(self, stats: CacheStats) -> None:
|
||||
"""Set the CacheStats object for collecting cache statistics."""
|
||||
self._stats = stats
|
||||
|
||||
@synchronized
|
||||
def put(self, key: str, model: AnyModel) -> None:
|
||||
"""Add a model to the cache."""
|
||||
if key in self._cached_models:
|
||||
@@ -168,25 +142,34 @@ class ModelCache:
|
||||
size = calc_model_size_by_data(self._logger, model)
|
||||
self.make_room(size)
|
||||
|
||||
# Inject custom modules into the model.
|
||||
if isinstance(model, torch.nn.Module):
|
||||
apply_custom_layers_to_model(model)
|
||||
|
||||
# Partial loading only makes sense on CUDA.
|
||||
# - When running on CPU, there is no 'loading' to do.
|
||||
# - When running on MPS, memory is shared with the CPU, so the default OS memory management already handles this
|
||||
# well.
|
||||
running_with_cuda = self._execution_device.type == "cuda"
|
||||
|
||||
# Specific models that opt-out of partial loading.
|
||||
partial_loading_opt_out_models = (
|
||||
# The following models have multiple entrypoints. Our auto-casting context management is only applied to the
|
||||
# forward method, so a partially loaded AutoEncoder could fail if another entrypoint is used. These models
|
||||
# can be supported in the future by improving the autocast context management.
|
||||
# AutoEncoder has three entrypoints: encode, decode, and forward.
|
||||
AutoEncoder,
|
||||
# XLabsIPAdapterFlux is a wrapper around two models that are called directly.
|
||||
XlabsIpAdapterFlux,
|
||||
AutoencoderKL,
|
||||
)
|
||||
|
||||
# Wrap model.
|
||||
if isinstance(model, torch.nn.Module) and running_with_cuda and self._enable_partial_loading:
|
||||
wrapped_model = CachedModelWithPartialLoad(
|
||||
model, self._execution_device, keep_ram_copy=self._keep_ram_copy_of_weights
|
||||
)
|
||||
if (
|
||||
isinstance(model, torch.nn.Module)
|
||||
and running_with_cuda
|
||||
and self._enable_partial_loading
|
||||
and not isinstance(model, partial_loading_opt_out_models)
|
||||
):
|
||||
wrapped_model = CachedModelWithPartialLoad(model, self._execution_device)
|
||||
else:
|
||||
wrapped_model = CachedModelOnlyFullLoad(
|
||||
model, self._execution_device, size, keep_ram_copy=self._keep_ram_copy_of_weights
|
||||
)
|
||||
wrapped_model = CachedModelOnlyFullLoad(model, self._execution_device, size)
|
||||
|
||||
cache_record = CacheRecord(key=key, cached_model=wrapped_model)
|
||||
self._cached_models[key] = cache_record
|
||||
@@ -195,7 +178,6 @@ class ModelCache:
|
||||
f"Added model {key} (Type: {model.__class__.__name__}, Wrap mode: {wrapped_model.__class__.__name__}, Model size: {size/MB:.2f}MB)"
|
||||
)
|
||||
|
||||
@synchronized
|
||||
def get(self, key: str, stats_name: Optional[str] = None) -> CacheRecord:
|
||||
"""Retrieve a model from the cache.
|
||||
|
||||
@@ -224,29 +206,24 @@ class ModelCache:
|
||||
self.stats.loaded_model_sizes.get(stats_name, 0), cache_entry.cached_model.total_bytes()
|
||||
)
|
||||
|
||||
# This moves the entry to the top (right end) of the stack.
|
||||
# this moves the entry to the top (right end) of the stack
|
||||
self._cache_stack = [k for k in self._cache_stack if k != key]
|
||||
self._cache_stack.append(key)
|
||||
|
||||
self._logger.debug(f"Cache hit: {key} (Type: {cache_entry.cached_model.model.__class__.__name__})")
|
||||
|
||||
return cache_entry
|
||||
|
||||
@synchronized
|
||||
def lock(self, cache_entry: CacheRecord, working_mem_bytes: Optional[int]) -> None:
|
||||
"""Lock a model for use and move it into VRAM."""
|
||||
if cache_entry.key not in self._cached_models:
|
||||
self._logger.info(
|
||||
f"Locking model cache entry {cache_entry.key} "
|
||||
f"(Type: {cache_entry.cached_model.model.__class__.__name__}), but it has already been dropped from "
|
||||
"the RAM cache. This is a sign that the model loading order is non-optimal in the invocation code "
|
||||
"(See https://github.com/invoke-ai/InvokeAI/issues/7513)."
|
||||
)
|
||||
# cache_entry = self._cached_models[key]
|
||||
def lock(self, key: str, working_mem_bytes: Optional[int]) -> None:
|
||||
"""Lock a model for use and move it into VRAM.
|
||||
|
||||
:param working_mem_bytes: The number of bytes of working memory to keep on the GPU while this model is loaded on
|
||||
the GPU. If None, self._execution_device_working_mem_gb is used.
|
||||
"""
|
||||
cache_entry = self._cached_models[key]
|
||||
cache_entry.lock()
|
||||
|
||||
self._logger.debug(
|
||||
f"Locking model {cache_entry.key} (Type: {cache_entry.cached_model.model.__class__.__name__})"
|
||||
)
|
||||
self._logger.debug(f"Locking model {key} (Type: {cache_entry.cached_model.model.__class__.__name__})")
|
||||
|
||||
if self._execution_device.type == "cpu":
|
||||
# Models don't need to be loaded into VRAM if we're running on CPU.
|
||||
@@ -255,7 +232,7 @@ class ModelCache:
|
||||
try:
|
||||
self._load_locked_model(cache_entry, working_mem_bytes)
|
||||
self._logger.debug(
|
||||
f"Finished locking model {cache_entry.key} (Type: {cache_entry.cached_model.model.__class__.__name__})"
|
||||
f"Finished locking model {key} (Type: {cache_entry.cached_model.model.__class__.__name__})"
|
||||
)
|
||||
except torch.cuda.OutOfMemoryError:
|
||||
self._logger.warning("Insufficient GPU memory to load model. Aborting")
|
||||
@@ -267,25 +244,16 @@ class ModelCache:
|
||||
|
||||
self._log_cache_state()
|
||||
|
||||
@synchronized
|
||||
def unlock(self, cache_entry: CacheRecord) -> None:
|
||||
def unlock(self, key: str) -> None:
|
||||
"""Unlock a model."""
|
||||
if cache_entry.key not in self._cached_models:
|
||||
self._logger.info(
|
||||
f"Unlocking model cache entry {cache_entry.key} "
|
||||
f"(Type: {cache_entry.cached_model.model.__class__.__name__}), but it has already been dropped from "
|
||||
"the RAM cache. This is a sign that the model loading order is non-optimal in the invocation code "
|
||||
"(See https://github.com/invoke-ai/InvokeAI/issues/7513)."
|
||||
)
|
||||
# cache_entry = self._cached_models[key]
|
||||
cache_entry = self._cached_models[key]
|
||||
cache_entry.unlock()
|
||||
self._logger.debug(
|
||||
f"Unlocked model {cache_entry.key} (Type: {cache_entry.cached_model.model.__class__.__name__})"
|
||||
)
|
||||
self._logger.debug(f"Unlocked model {key} (Type: {cache_entry.cached_model.model.__class__.__name__})")
|
||||
|
||||
def _load_locked_model(self, cache_entry: CacheRecord, working_mem_bytes: Optional[int] = None) -> None:
|
||||
"""Helper function for self.lock(). Loads a locked model into VRAM."""
|
||||
start_time = time.time()
|
||||
vram_available = self._get_vram_available(working_mem_bytes)
|
||||
|
||||
# Calculate model_vram_needed, the amount of additional VRAM that will be used if we fully load the model into
|
||||
# VRAM.
|
||||
@@ -293,7 +261,9 @@ class ModelCache:
|
||||
model_total_bytes = cache_entry.cached_model.total_bytes()
|
||||
model_vram_needed = model_total_bytes - model_cur_vram_bytes
|
||||
|
||||
vram_available = self._get_vram_available(working_mem_bytes)
|
||||
# The amount of VRAM that must be freed to make room for model_vram_needed.
|
||||
vram_bytes_to_free = max(0, model_vram_needed - vram_available)
|
||||
|
||||
self._logger.debug(
|
||||
f"Before unloading: {self._get_vram_state_str(model_cur_vram_bytes, model_total_bytes, vram_available)}"
|
||||
)
|
||||
@@ -302,7 +272,7 @@ class ModelCache:
|
||||
# 1. If the model can fit entirely in VRAM, then make enough room for it to be loaded fully.
|
||||
# 2. If the model can't fit fully into VRAM, then unload all other models and load as much of the model as
|
||||
# possible.
|
||||
vram_bytes_freed = self._offload_unlocked_models(model_vram_needed, working_mem_bytes)
|
||||
vram_bytes_freed = self._offload_unlocked_models(vram_bytes_to_free)
|
||||
self._logger.debug(f"Unloaded models (if necessary): vram_bytes_freed={(vram_bytes_freed/MB):.2f}MB")
|
||||
|
||||
# Check the updated vram_available after offloading.
|
||||
@@ -311,21 +281,10 @@ class ModelCache:
|
||||
f"After unloading: {self._get_vram_state_str(model_cur_vram_bytes, model_total_bytes, vram_available)}"
|
||||
)
|
||||
|
||||
if vram_available < 0:
|
||||
# There is insufficient VRAM available. As a last resort, try to unload the model being locked from VRAM,
|
||||
# as it may still be loaded from a previous use.
|
||||
vram_bytes_freed_from_own_model = self._move_model_to_ram(cache_entry, -vram_available)
|
||||
vram_available = self._get_vram_available(working_mem_bytes)
|
||||
self._logger.debug(
|
||||
f"Unloaded {vram_bytes_freed_from_own_model/MB:.2f}MB from the model being locked ({cache_entry.key})."
|
||||
)
|
||||
|
||||
# Move as much of the model as possible into VRAM.
|
||||
# For testing, only allow 10% of the model to be loaded into VRAM.
|
||||
# vram_available = int(model_vram_needed * 0.1)
|
||||
# We add 1 MB to the available VRAM to account for small errors in memory tracking (e.g. off-by-one). A fully
|
||||
# loaded model is much faster than a 95% loaded model.
|
||||
model_bytes_loaded = self._move_model_to_vram(cache_entry, vram_available + MB)
|
||||
model_bytes_loaded = self._move_model_to_vram(cache_entry, vram_available)
|
||||
|
||||
model_cur_vram_bytes = cache_entry.cached_model.cur_vram_bytes()
|
||||
vram_available = self._get_vram_available(working_mem_bytes)
|
||||
@@ -360,9 +319,7 @@ class ModelCache:
|
||||
def _move_model_to_ram(self, cache_entry: CacheRecord, vram_bytes_to_free: int) -> int:
|
||||
try:
|
||||
if isinstance(cache_entry.cached_model, CachedModelWithPartialLoad):
|
||||
return cache_entry.cached_model.partial_unload_from_vram(
|
||||
vram_bytes_to_free, keep_required_weights_in_vram=cache_entry.is_locked
|
||||
)
|
||||
return cache_entry.cached_model.partial_unload_from_vram(vram_bytes_to_free)
|
||||
elif isinstance(cache_entry.cached_model, CachedModelOnlyFullLoad): # type: ignore
|
||||
return cache_entry.cached_model.full_unload_from_vram()
|
||||
else:
|
||||
@@ -372,7 +329,7 @@ class ModelCache:
|
||||
self._delete_cache_entry(cache_entry)
|
||||
raise
|
||||
|
||||
def _get_vram_available(self, working_mem_bytes: Optional[int]) -> int:
|
||||
def _get_vram_available(self, working_mem_bytes: Optional[int] = None) -> int:
|
||||
"""Calculate the amount of additional VRAM available for the cache to use (takes into account the working
|
||||
memory).
|
||||
"""
|
||||
@@ -385,13 +342,9 @@ class ModelCache:
|
||||
working_mem_bytes = max(working_mem_bytes or working_mem_bytes_default, working_mem_bytes_default)
|
||||
|
||||
if self._execution_device.type == "cuda":
|
||||
# TODO(ryand): It is debatable whether we should use memory_reserved() or memory_allocated() here.
|
||||
# memory_reserved() includes memory reserved by the torch CUDA memory allocator that may or may not be
|
||||
# re-used for future allocations. For now, we use memory_allocated() to be conservative.
|
||||
# vram_reserved = torch.cuda.memory_reserved(self._execution_device)
|
||||
vram_allocated = torch.cuda.memory_allocated(self._execution_device)
|
||||
vram_reserved = torch.cuda.memory_reserved(self._execution_device)
|
||||
vram_free, _vram_total = torch.cuda.mem_get_info(self._execution_device)
|
||||
vram_available_to_process = vram_free + vram_allocated
|
||||
vram_available_to_process = vram_free + vram_reserved
|
||||
elif self._execution_device.type == "mps":
|
||||
vram_reserved = torch.mps.driver_allocated_memory()
|
||||
# TODO(ryand): Is it accurate that MPS shares memory with the CPU?
|
||||
@@ -406,86 +359,35 @@ class ModelCache:
|
||||
|
||||
def _get_vram_in_use(self) -> int:
|
||||
"""Get the amount of VRAM currently in use by the cache."""
|
||||
if self._execution_device.type == "cuda":
|
||||
return torch.cuda.memory_allocated()
|
||||
elif self._execution_device.type == "mps":
|
||||
return torch.mps.current_allocated_memory()
|
||||
else:
|
||||
raise ValueError(f"Unsupported execution device type: {self._execution_device.type}")
|
||||
# Alternative definition of VRAM in use:
|
||||
# return sum(ce.cached_model.cur_vram_bytes() for ce in self._cached_models.values())
|
||||
return sum(ce.cached_model.cur_vram_bytes() for ce in self._cached_models.values())
|
||||
|
||||
def _get_ram_available(self) -> int:
|
||||
"""Get the amount of RAM available for the cache to use, while keeping memory pressure under control."""
|
||||
|
||||
def _calc_ram_available_to_model_cache(self) -> int:
|
||||
"""Calculate the amount of RAM available for the cache to use."""
|
||||
# If self._max_ram_cache_size_gb is set, then it overrides the default logic.
|
||||
if self._max_ram_cache_size_gb is not None:
|
||||
self._logger.info(f"Using user-defined RAM cache size: {self._max_ram_cache_size_gb} GB.")
|
||||
return int(self._max_ram_cache_size_gb * GB)
|
||||
ram_total_available_to_cache = int(self._max_ram_cache_size_gb * GB)
|
||||
return ram_total_available_to_cache - self._get_ram_in_use()
|
||||
|
||||
# Heuristics for dynamically calculating the RAM cache size, **in order of increasing priority**:
|
||||
# 1. As an initial default, use 50% of the total RAM for InvokeAI.
|
||||
# - Assume a 2GB baseline for InvokeAI's non-model RAM usage, and use the rest of the RAM for the model cache.
|
||||
# 2. On a system with a lot of RAM, users probably don't want InvokeAI to eat up too much RAM.
|
||||
# There are diminishing returns to storing more and more models. So, we apply an upper bound. (Keep in mind
|
||||
# that most OSes have some amount of disk caching, which we still benefit from if there is excess memory,
|
||||
# even if we drop models from the cache.)
|
||||
# - On systems without a CUDA device, the upper bound is 32GB.
|
||||
# - On systems with a CUDA device, the upper bound is 1x the amount of VRAM (less the working memory).
|
||||
# 3. Absolute minimum of 4GB.
|
||||
virtual_memory = psutil.virtual_memory()
|
||||
ram_total = virtual_memory.total
|
||||
ram_available = virtual_memory.available
|
||||
ram_used = ram_total - ram_available
|
||||
|
||||
# NOTE(ryand): We explored dynamically adjusting the RAM cache size based on memory pressure (using psutil), but
|
||||
# decided against it for now, for the following reasons:
|
||||
# - It was surprisingly difficult to get memory metrics with consistent definitions across OSes. (If you go
|
||||
# down this path again, don't underestimate the amount of complexity here and be sure to test rigorously on all
|
||||
# OSes.)
|
||||
# - Making the RAM cache size dynamic opens the door for performance regressions that are hard to diagnose and
|
||||
# hard for users to understand. It is better for users to see that their RAM is maxed out, and then override
|
||||
# the default value if desired.
|
||||
# The total size of all the models in the cache will often be larger than the amount of RAM reported by psutil
|
||||
# (due to lazy-loading and OS RAM caching behaviour). We could just rely on the psutil values, but it feels
|
||||
# like a bad idea to over-fill the model cache. So, for now, we'll try to keep the total size of models in the
|
||||
# cache under the total amount of system RAM.
|
||||
cache_ram_used = self._get_ram_in_use()
|
||||
ram_used = max(cache_ram_used, ram_used)
|
||||
|
||||
# Lookup the total VRAM size for the CUDA execution device.
|
||||
total_cuda_vram_bytes: int | None = None
|
||||
if self._execution_device.type == "cuda":
|
||||
_, total_cuda_vram_bytes = torch.cuda.mem_get_info(self._execution_device)
|
||||
|
||||
# Apply heuristic 1.
|
||||
# ------------------
|
||||
heuristics_applied = [1]
|
||||
total_system_ram_bytes = psutil.virtual_memory().total
|
||||
# Assumed baseline RAM used by InvokeAI for non-model stuff.
|
||||
baseline_ram_used_by_invokeai = 2 * GB
|
||||
ram_available_to_model_cache = int(total_system_ram_bytes * 0.5 - baseline_ram_used_by_invokeai)
|
||||
|
||||
# Apply heuristic 2.
|
||||
# ------------------
|
||||
max_ram_cache_size_bytes = 32 * GB
|
||||
if total_cuda_vram_bytes is not None:
|
||||
if self._max_vram_cache_size_gb is not None:
|
||||
max_ram_cache_size_bytes = int(self._max_vram_cache_size_gb * GB)
|
||||
else:
|
||||
max_ram_cache_size_bytes = total_cuda_vram_bytes - int(self._execution_device_working_mem_gb * GB)
|
||||
if ram_available_to_model_cache > max_ram_cache_size_bytes:
|
||||
heuristics_applied.append(2)
|
||||
ram_available_to_model_cache = max_ram_cache_size_bytes
|
||||
|
||||
# Apply heuristic 3.
|
||||
# ------------------
|
||||
if ram_available_to_model_cache < 4 * GB:
|
||||
heuristics_applied.append(3)
|
||||
ram_available_to_model_cache = 4 * GB
|
||||
|
||||
self._logger.info(
|
||||
f"Calculated model RAM cache size: {ram_available_to_model_cache / MB:.2f} MB. Heuristics applied: {heuristics_applied}."
|
||||
)
|
||||
return ram_available_to_model_cache
|
||||
# Aim to keep 10% of RAM free.
|
||||
return int(ram_total * 0.9) - ram_used
|
||||
|
||||
def _get_ram_in_use(self) -> int:
|
||||
"""Get the amount of RAM currently in use."""
|
||||
return sum(ce.cached_model.total_bytes() for ce in self._cached_models.values())
|
||||
|
||||
def _get_ram_available(self) -> int:
|
||||
"""Get the amount of RAM available for the cache to use."""
|
||||
return self._ram_cache_size_bytes - self._get_ram_in_use()
|
||||
|
||||
def _capture_memory_snapshot(self) -> Optional[MemorySnapshot]:
|
||||
if self._log_memory_usage:
|
||||
return MemorySnapshot.capture()
|
||||
@@ -501,30 +403,24 @@ class ModelCache:
|
||||
+ f"vram_available={(vram_available/MB):.0f} MB, "
|
||||
)
|
||||
|
||||
def _offload_unlocked_models(self, vram_bytes_required: int, working_mem_bytes: Optional[int] = None) -> int:
|
||||
"""Offload models from the execution_device until vram_bytes_required bytes are available, or all models are
|
||||
def _offload_unlocked_models(self, vram_bytes_to_free: int) -> int:
|
||||
"""Offload models from the execution_device until vram_bytes_to_free bytes are freed, or all models are
|
||||
offloaded. Of course, locked models are not offloaded.
|
||||
|
||||
Returns:
|
||||
int: The number of bytes freed based on believed model sizes. The actual change in VRAM may be different.
|
||||
int: The number of bytes freed.
|
||||
"""
|
||||
self._logger.debug(
|
||||
f"Offloading unlocked models with goal of making room for {vram_bytes_required/MB:.2f}MB of VRAM."
|
||||
)
|
||||
self._logger.debug(f"Offloading unlocked models with goal of freeing {vram_bytes_to_free/MB:.2f}MB of VRAM.")
|
||||
vram_bytes_freed = 0
|
||||
# TODO(ryand): Give more thought to the offloading policy used here.
|
||||
cache_entries_increasing_size = sorted(self._cached_models.values(), key=lambda x: x.cached_model.total_bytes())
|
||||
for cache_entry in cache_entries_increasing_size:
|
||||
# We do not fully trust the count of bytes freed, so we check again on each iteration.
|
||||
vram_available = self._get_vram_available(working_mem_bytes)
|
||||
vram_bytes_to_free = vram_bytes_required - vram_available
|
||||
if vram_bytes_to_free <= 0:
|
||||
if vram_bytes_freed >= vram_bytes_to_free:
|
||||
break
|
||||
if cache_entry.is_locked:
|
||||
# TODO(ryand): In the future, we may want to partially unload locked models, but this requires careful
|
||||
# handling of model patches (e.g. LoRA).
|
||||
continue
|
||||
cache_entry_bytes_freed = self._move_model_to_ram(cache_entry, vram_bytes_to_free)
|
||||
|
||||
cache_entry_bytes_freed = self._move_model_to_ram(cache_entry, vram_bytes_to_free - vram_bytes_freed)
|
||||
if cache_entry_bytes_freed > 0:
|
||||
self._logger.debug(
|
||||
f"Unloaded {cache_entry.key} from VRAM to free {(cache_entry_bytes_freed/MB):.0f} MB."
|
||||
@@ -560,7 +456,7 @@ class ModelCache:
|
||||
|
||||
if self._execution_device.type != "cpu":
|
||||
vram_in_use_bytes = self._get_vram_in_use()
|
||||
vram_available_bytes = self._get_vram_available(None)
|
||||
vram_available_bytes = self._get_vram_available()
|
||||
vram_size_bytes = vram_in_use_bytes + vram_available_bytes
|
||||
vram_in_use_bytes_percent = vram_in_use_bytes / vram_size_bytes if vram_size_bytes > 0 else 0
|
||||
vram_available_bytes_percent = vram_available_bytes / vram_size_bytes if vram_size_bytes > 0 else 0
|
||||
@@ -601,7 +497,6 @@ class ModelCache:
|
||||
|
||||
self._logger.debug(log)
|
||||
|
||||
@synchronized
|
||||
def make_room(self, bytes_needed: int) -> None:
|
||||
"""Make enough room in the cache to accommodate a new model of indicated size.
|
||||
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
from typing import Any, Callable
|
||||
|
||||
import torch
|
||||
from torch.overrides import TorchFunctionMode
|
||||
|
||||
|
||||
def add_autocast_to_module_forward(m: torch.nn.Module, to_device: torch.device):
|
||||
"""Monkey-patch m.forward(...) with a new forward(...) method that activates device autocasting for its duration."""
|
||||
old_forward = m.forward
|
||||
|
||||
def new_forward(*args: Any, **kwargs: Any):
|
||||
with TorchFunctionAutocastDeviceContext(to_device):
|
||||
return old_forward(*args, **kwargs)
|
||||
|
||||
m.old_forward = old_forward # type: ignore
|
||||
m.forward = new_forward
|
||||
|
||||
|
||||
def remove_autocast_from_module_forward(m: torch.nn.Module):
|
||||
"""Remove the autocast context from m.forward(...) and restore the old forward method."""
|
||||
if not hasattr(m, "old_forward"):
|
||||
return
|
||||
m.forward = m.old_forward
|
||||
del m.old_forward
|
||||
|
||||
|
||||
def _cast_to_device_and_run(
|
||||
func: Callable[..., Any], args: tuple[Any, ...], kwargs: dict[str, Any], to_device: torch.device
|
||||
):
|
||||
args_on_device = [a.to(to_device) if isinstance(a, torch.Tensor) else a for a in args]
|
||||
kwargs_on_device = {k: v.to(to_device) if isinstance(v, torch.Tensor) else v for k, v in kwargs.items()}
|
||||
return func(*args_on_device, **kwargs_on_device)
|
||||
|
||||
|
||||
class TorchFunctionAutocastDeviceContext(TorchFunctionMode):
|
||||
def __init__(self, to_device: torch.device):
|
||||
self._to_device = to_device
|
||||
|
||||
def __torch_function__(
|
||||
self, func: Callable[..., Any], types, args: tuple[Any, ...] = (), kwargs: dict[str, Any] | None = None
|
||||
):
|
||||
return _cast_to_device_and_run(func, args, kwargs or {}, self._to_device)
|
||||
@@ -1,15 +0,0 @@
|
||||
from typing import TypeVar
|
||||
|
||||
import torch
|
||||
|
||||
T = TypeVar("T", torch.Tensor, None, torch.Tensor | None)
|
||||
|
||||
|
||||
def cast_to_device(t: T, to_device: torch.device) -> T:
|
||||
"""Helper function to cast an optional tensor to a target device."""
|
||||
if t is None:
|
||||
return t
|
||||
|
||||
if t.device.type != to_device.type:
|
||||
return t.to(to_device)
|
||||
return t
|
||||
@@ -1,8 +0,0 @@
|
||||
|
||||
This directory contains custom implementations of common torch.nn.Module classes that add support for:
|
||||
- Streaming weights to the execution device
|
||||
- Applying sidecar patches at execution time (e.g. sidecar LoRA layers)
|
||||
|
||||
Each custom class sub-classes the original module type that is is replacing, so the following properties are preserved:
|
||||
- `isinstance(m, torch.nn.OrginalModule)` should still work.
|
||||
- Patching the weights directly (e.g. for LoRA) should still work. (Of course, this is not possible for quantized layers, hence the sidecar support.)
|
||||
@@ -1,43 +0,0 @@
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.cast_to_device import cast_to_device
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_module_mixin import (
|
||||
CustomModuleMixin,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.utils import (
|
||||
add_nullable_tensors,
|
||||
)
|
||||
|
||||
|
||||
class CustomConv1d(torch.nn.Conv1d, CustomModuleMixin):
|
||||
def _autocast_forward_with_patches(self, input: torch.Tensor) -> torch.Tensor:
|
||||
weight = cast_to_device(self.weight, input.device)
|
||||
bias = cast_to_device(self.bias, input.device)
|
||||
|
||||
# Prepare the original parameters for the patch aggregation.
|
||||
orig_params = {"weight": weight, "bias": bias}
|
||||
# Filter out None values.
|
||||
orig_params = {k: v for k, v in orig_params.items() if v is not None}
|
||||
|
||||
aggregated_param_residuals = self._aggregate_patch_parameters(
|
||||
patches_and_weights=self._patches_and_weights,
|
||||
orig_params=orig_params,
|
||||
device=input.device,
|
||||
)
|
||||
|
||||
weight = add_nullable_tensors(weight, aggregated_param_residuals.get("weight", None))
|
||||
bias = add_nullable_tensors(bias, aggregated_param_residuals.get("bias", None))
|
||||
return self._conv_forward(input, weight, bias)
|
||||
|
||||
def _autocast_forward(self, input: torch.Tensor) -> torch.Tensor:
|
||||
weight = cast_to_device(self.weight, input.device)
|
||||
bias = cast_to_device(self.bias, input.device)
|
||||
return self._conv_forward(input, weight, bias)
|
||||
|
||||
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
||||
if len(self._patches_and_weights) > 0:
|
||||
return self._autocast_forward_with_patches(input)
|
||||
elif self._device_autocasting_enabled:
|
||||
return self._autocast_forward(input)
|
||||
else:
|
||||
return super().forward(input)
|
||||
@@ -1,43 +0,0 @@
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.cast_to_device import cast_to_device
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_module_mixin import (
|
||||
CustomModuleMixin,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.utils import (
|
||||
add_nullable_tensors,
|
||||
)
|
||||
|
||||
|
||||
class CustomConv2d(torch.nn.Conv2d, CustomModuleMixin):
|
||||
def _autocast_forward_with_patches(self, input: torch.Tensor) -> torch.Tensor:
|
||||
weight = cast_to_device(self.weight, input.device)
|
||||
bias = cast_to_device(self.bias, input.device)
|
||||
|
||||
# Prepare the original parameters for the patch aggregation.
|
||||
orig_params = {"weight": weight, "bias": bias}
|
||||
# Filter out None values.
|
||||
orig_params = {k: v for k, v in orig_params.items() if v is not None}
|
||||
|
||||
aggregated_param_residuals = self._aggregate_patch_parameters(
|
||||
patches_and_weights=self._patches_and_weights,
|
||||
orig_params=orig_params,
|
||||
device=input.device,
|
||||
)
|
||||
|
||||
weight = add_nullable_tensors(weight, aggregated_param_residuals.get("weight", None))
|
||||
bias = add_nullable_tensors(bias, aggregated_param_residuals.get("bias", None))
|
||||
return self._conv_forward(input, weight, bias)
|
||||
|
||||
def _autocast_forward(self, input: torch.Tensor) -> torch.Tensor:
|
||||
weight = cast_to_device(self.weight, input.device)
|
||||
bias = cast_to_device(self.bias, input.device)
|
||||
return self._conv_forward(input, weight, bias)
|
||||
|
||||
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
||||
if len(self._patches_and_weights) > 0:
|
||||
return self._autocast_forward_with_patches(input)
|
||||
elif self._device_autocasting_enabled:
|
||||
return self._autocast_forward(input)
|
||||
else:
|
||||
return super().forward(input)
|
||||
@@ -1,29 +0,0 @@
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.cast_to_device import cast_to_device
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_module_mixin import (
|
||||
CustomModuleMixin,
|
||||
)
|
||||
|
||||
|
||||
class CustomEmbedding(torch.nn.Embedding, CustomModuleMixin):
|
||||
def _autocast_forward(self, input: torch.Tensor) -> torch.Tensor:
|
||||
weight = cast_to_device(self.weight, input.device)
|
||||
return torch.nn.functional.embedding(
|
||||
input,
|
||||
weight,
|
||||
self.padding_idx,
|
||||
self.max_norm,
|
||||
self.norm_type,
|
||||
self.scale_grad_by_freq,
|
||||
self.sparse,
|
||||
)
|
||||
|
||||
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
||||
if len(self._patches_and_weights) > 0:
|
||||
raise RuntimeError("Embedding layers do not support patches")
|
||||
|
||||
if self._device_autocasting_enabled:
|
||||
return self._autocast_forward(input)
|
||||
else:
|
||||
return super().forward(input)
|
||||
@@ -1,36 +0,0 @@
|
||||
import torch
|
||||
|
||||
from invokeai.backend.flux.modules.layers import RMSNorm
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.cast_to_device import cast_to_device
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_module_mixin import (
|
||||
CustomModuleMixin,
|
||||
)
|
||||
from invokeai.backend.patches.layers.set_parameter_layer import SetParameterLayer
|
||||
|
||||
|
||||
class CustomFluxRMSNorm(RMSNorm, CustomModuleMixin):
|
||||
def _autocast_forward_with_patches(self, x: torch.Tensor) -> torch.Tensor:
|
||||
# Currently, CustomFluxRMSNorm layers only support patching with a single SetParameterLayer.
|
||||
assert len(self._patches_and_weights) == 1
|
||||
patch, _patch_weight = self._patches_and_weights[0]
|
||||
assert isinstance(patch, SetParameterLayer)
|
||||
assert patch.param_name == "scale"
|
||||
|
||||
scale = cast_to_device(patch.weight, x.device)
|
||||
|
||||
# Apply the patch.
|
||||
# NOTE(ryand): Currently, we ignore the patch weight when running as a sidecar. It's not clear how this should
|
||||
# be handled.
|
||||
return torch.nn.functional.rms_norm(x, scale.shape, scale, eps=1e-6)
|
||||
|
||||
def _autocast_forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
scale = cast_to_device(self.scale, x.device)
|
||||
return torch.nn.functional.rms_norm(x, scale.shape, scale, eps=1e-6)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
if len(self._patches_and_weights) > 0:
|
||||
return self._autocast_forward_with_patches(x)
|
||||
elif self._device_autocasting_enabled:
|
||||
return self._autocast_forward(x)
|
||||
else:
|
||||
return super().forward(x)
|
||||
@@ -1,22 +0,0 @@
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.cast_to_device import cast_to_device
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_module_mixin import (
|
||||
CustomModuleMixin,
|
||||
)
|
||||
|
||||
|
||||
class CustomGroupNorm(torch.nn.GroupNorm, CustomModuleMixin):
|
||||
def _autocast_forward(self, input: torch.Tensor) -> torch.Tensor:
|
||||
weight = cast_to_device(self.weight, input.device)
|
||||
bias = cast_to_device(self.bias, input.device)
|
||||
return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps)
|
||||
|
||||
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
||||
if len(self._patches_and_weights) > 0:
|
||||
raise RuntimeError("GroupNorm layers do not support patches")
|
||||
|
||||
if self._device_autocasting_enabled:
|
||||
return self._autocast_forward(input)
|
||||
else:
|
||||
return super().forward(input)
|
||||
@@ -1,44 +0,0 @@
|
||||
import bitsandbytes as bnb
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.cast_to_device import cast_to_device
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_linear import (
|
||||
autocast_linear_forward_sidecar_patches,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_module_mixin import (
|
||||
CustomModuleMixin,
|
||||
)
|
||||
from invokeai.backend.quantization.bnb_llm_int8 import InvokeLinear8bitLt
|
||||
|
||||
|
||||
class CustomInvokeLinear8bitLt(InvokeLinear8bitLt, CustomModuleMixin):
|
||||
def _autocast_forward_with_patches(self, x: torch.Tensor) -> torch.Tensor:
|
||||
return autocast_linear_forward_sidecar_patches(self, x, self._patches_and_weights)
|
||||
|
||||
def _autocast_forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
matmul_state = bnb.MatmulLtState()
|
||||
matmul_state.threshold = self.state.threshold
|
||||
matmul_state.has_fp16_weights = self.state.has_fp16_weights
|
||||
matmul_state.use_pool = self.state.use_pool
|
||||
matmul_state.is_training = self.training
|
||||
# The underlying InvokeInt8Params weight must already be quantized.
|
||||
assert self.weight.CB is not None
|
||||
matmul_state.CB = cast_to_device(self.weight.CB, x.device)
|
||||
matmul_state.SCB = cast_to_device(self.weight.SCB, x.device)
|
||||
|
||||
# weights are cast automatically as Int8Params, but the bias has to be cast manually.
|
||||
if self.bias is not None and self.bias.dtype != x.dtype:
|
||||
self.bias.data = self.bias.data.to(x.dtype)
|
||||
|
||||
# NOTE(ryand): The second parameter should not be needed at all given our expected inference configuration, but
|
||||
# it's dtype field must be accessible, even though it's not used. We pass in self.weight even though it could be
|
||||
# on the wrong device.
|
||||
return bnb.matmul(x, self.weight, bias=cast_to_device(self.bias, x.device), state=matmul_state)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
if len(self._patches_and_weights) > 0:
|
||||
return self._autocast_forward_with_patches(x)
|
||||
elif self._device_autocasting_enabled:
|
||||
return self._autocast_forward(x)
|
||||
else:
|
||||
return super().forward(x)
|
||||
@@ -1,62 +0,0 @@
|
||||
import copy
|
||||
|
||||
import bitsandbytes as bnb
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.cast_to_device import cast_to_device
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_linear import (
|
||||
autocast_linear_forward_sidecar_patches,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_module_mixin import (
|
||||
CustomModuleMixin,
|
||||
)
|
||||
from invokeai.backend.quantization.bnb_nf4 import InvokeLinearNF4
|
||||
|
||||
|
||||
class CustomInvokeLinearNF4(InvokeLinearNF4, CustomModuleMixin):
|
||||
def _autocast_forward_with_patches(self, x: torch.Tensor) -> torch.Tensor:
|
||||
return autocast_linear_forward_sidecar_patches(self, x, self._patches_and_weights)
|
||||
|
||||
def _autocast_forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
bnb.nn.modules.fix_4bit_weight_quant_state_from_module(self)
|
||||
|
||||
# weights are cast automatically as Int8Params, but the bias has to be cast manually
|
||||
if self.bias is not None and self.bias.dtype != x.dtype:
|
||||
self.bias.data = self.bias.data.to(x.dtype)
|
||||
|
||||
if not self.compute_type_is_set:
|
||||
self.set_compute_type(x)
|
||||
self.compute_type_is_set = True
|
||||
|
||||
inp_dtype = x.dtype
|
||||
if self.compute_dtype is not None:
|
||||
x = x.to(self.compute_dtype)
|
||||
|
||||
bias = None if self.bias is None else self.bias.to(self.compute_dtype)
|
||||
|
||||
# HACK(ryand): Casting self.weight to the device also casts the self.weight.quant_state in-place (i.e. it
|
||||
# does not follow the tensor semantics of returning a new copy when converting to a different device). This
|
||||
# means that quant_state elements that started on the CPU would be left on the GPU, which we don't want. To
|
||||
# avoid this side effect we make a shallow copy of the original quant_state so that we can restore it. Fixing
|
||||
# this properly would require more invasive changes to the bitsandbytes library.
|
||||
|
||||
# Make a shallow copy of the quant_state so that we can undo the in-place modification that occurs when casting
|
||||
# to a new device.
|
||||
old_quant_state = copy.copy(self.weight.quant_state)
|
||||
weight = cast_to_device(self.weight, x.device)
|
||||
self.weight.quant_state = old_quant_state
|
||||
|
||||
# For some reason, the quant_state.to(...) implementation fails to cast the quant_state.code field. We do this
|
||||
# manually here.
|
||||
weight.quant_state.code = cast_to_device(weight.quant_state.code, x.device)
|
||||
|
||||
bias = cast_to_device(self.bias, x.device)
|
||||
return bnb.matmul_4bit(x, weight.t(), bias=bias, quant_state=weight.quant_state).to(inp_dtype)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
if len(self._patches_and_weights) > 0:
|
||||
return self._autocast_forward_with_patches(x)
|
||||
elif self._device_autocasting_enabled:
|
||||
return self._autocast_forward(x)
|
||||
else:
|
||||
return super().forward(x)
|
||||
@@ -1,84 +0,0 @@
|
||||
import copy
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.cast_to_device import cast_to_device
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_module_mixin import (
|
||||
CustomModuleMixin,
|
||||
)
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
|
||||
from invokeai.backend.patches.layers.lora_layer import LoRALayer
|
||||
|
||||
|
||||
def linear_lora_forward(input: torch.Tensor, lora_layer: LoRALayer, lora_weight: float) -> torch.Tensor:
|
||||
"""An optimized implementation of the residual calculation for a sidecar linear LoRALayer."""
|
||||
x = torch.nn.functional.linear(input, lora_layer.down)
|
||||
if lora_layer.mid is not None:
|
||||
x = torch.nn.functional.linear(x, lora_layer.mid)
|
||||
x = torch.nn.functional.linear(x, lora_layer.up, bias=lora_layer.bias)
|
||||
x *= lora_weight * lora_layer.scale()
|
||||
return x
|
||||
|
||||
|
||||
def autocast_linear_forward_sidecar_patches(
|
||||
orig_module: torch.nn.Linear, input: torch.Tensor, patches_and_weights: list[tuple[BaseLayerPatch, float]]
|
||||
) -> torch.Tensor:
|
||||
"""A function that runs a linear layer (quantized or non-quantized) with sidecar patches for a linear layer.
|
||||
Compatible with both quantized and non-quantized Linear layers.
|
||||
"""
|
||||
# First, apply the original linear layer.
|
||||
# NOTE: We slice the input to match the original weight shape in order to work with FluxControlLoRAs, which
|
||||
# change the linear layer's in_features.
|
||||
orig_input = input
|
||||
input = orig_input[..., : orig_module.in_features]
|
||||
output = orig_module._autocast_forward(input)
|
||||
|
||||
# Then, apply layers for which we have optimized implementations.
|
||||
unprocessed_patches_and_weights: list[tuple[BaseLayerPatch, float]] = []
|
||||
for patch, patch_weight in patches_and_weights:
|
||||
# Shallow copy the patch so that we can cast it to the target device without modifying the original patch.
|
||||
patch = copy.copy(patch)
|
||||
patch.to(input.device)
|
||||
|
||||
if isinstance(patch, FluxControlLoRALayer):
|
||||
# Note that we use the original input here, not the sliced input.
|
||||
output += linear_lora_forward(orig_input, patch, patch_weight)
|
||||
elif isinstance(patch, LoRALayer):
|
||||
output += linear_lora_forward(input, patch, patch_weight)
|
||||
else:
|
||||
unprocessed_patches_and_weights.append((patch, patch_weight))
|
||||
|
||||
# Finally, apply any remaining patches.
|
||||
if len(unprocessed_patches_and_weights) > 0:
|
||||
# Prepare the original parameters for the patch aggregation.
|
||||
orig_params = {"weight": orig_module.weight, "bias": orig_module.bias}
|
||||
# Filter out None values.
|
||||
orig_params = {k: v for k, v in orig_params.items() if v is not None}
|
||||
|
||||
aggregated_param_residuals = orig_module._aggregate_patch_parameters(
|
||||
unprocessed_patches_and_weights, orig_params=orig_params, device=input.device
|
||||
)
|
||||
output += torch.nn.functional.linear(
|
||||
input, aggregated_param_residuals["weight"], aggregated_param_residuals.get("bias", None)
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
class CustomLinear(torch.nn.Linear, CustomModuleMixin):
|
||||
def _autocast_forward_with_patches(self, input: torch.Tensor) -> torch.Tensor:
|
||||
return autocast_linear_forward_sidecar_patches(self, input, self._patches_and_weights)
|
||||
|
||||
def _autocast_forward(self, input: torch.Tensor) -> torch.Tensor:
|
||||
weight = cast_to_device(self.weight, input.device)
|
||||
bias = cast_to_device(self.bias, input.device)
|
||||
return torch.nn.functional.linear(input, weight, bias)
|
||||
|
||||
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
||||
if len(self._patches_and_weights) > 0:
|
||||
return self._autocast_forward_with_patches(input)
|
||||
elif self._device_autocasting_enabled:
|
||||
return self._autocast_forward(input)
|
||||
else:
|
||||
return super().forward(input)
|
||||
@@ -1,79 +0,0 @@
|
||||
import copy
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.param_shape_utils import get_param_shape
|
||||
from invokeai.backend.quantization.gguf.ggml_tensor import GGMLTensor
|
||||
|
||||
|
||||
class CustomModuleMixin:
|
||||
"""A mixin class for custom modules that enables device autocasting of module parameters."""
|
||||
|
||||
def __init__(self):
|
||||
self._device_autocasting_enabled = False
|
||||
self._patches_and_weights: list[tuple[BaseLayerPatch, float]] = []
|
||||
|
||||
def set_device_autocasting_enabled(self, enabled: bool):
|
||||
"""Pass True to enable autocasting of module parameters to the same device as the input tensor. Pass False to
|
||||
disable autocasting, which results in slightly faster execution speed when we know that device autocasting is
|
||||
not needed.
|
||||
"""
|
||||
self._device_autocasting_enabled = enabled
|
||||
|
||||
def is_device_autocasting_enabled(self) -> bool:
|
||||
"""Check if device autocasting is enabled for the module."""
|
||||
return self._device_autocasting_enabled
|
||||
|
||||
def add_patch(self, patch: BaseLayerPatch, patch_weight: float):
|
||||
"""Add a patch to the module."""
|
||||
self._patches_and_weights.append((patch, patch_weight))
|
||||
|
||||
def clear_patches(self):
|
||||
"""Clear all patches from the module."""
|
||||
self._patches_and_weights = []
|
||||
|
||||
def get_num_patches(self) -> int:
|
||||
"""Get the number of patches in the module."""
|
||||
return len(self._patches_and_weights)
|
||||
|
||||
def _aggregate_patch_parameters(
|
||||
self,
|
||||
patches_and_weights: list[tuple[BaseLayerPatch, float]],
|
||||
orig_params: dict[str, torch.Tensor],
|
||||
device: torch.device | None = None,
|
||||
):
|
||||
"""Helper function that aggregates the parameters from all patches into a single dict."""
|
||||
# HACK(ryand): If the original parameters are in a quantized format whose weights can't be accessed, we replace
|
||||
# them with dummy tensors on the 'meta' device. This allows patch layers to access the shapes of the original
|
||||
# parameters. But, of course, any sub-layers that need to access the actual values of the parameters will fail.
|
||||
for param_name in orig_params.keys():
|
||||
param = orig_params[param_name]
|
||||
if type(param) is torch.nn.Parameter and type(param.data) is torch.Tensor:
|
||||
pass
|
||||
elif type(param) is GGMLTensor:
|
||||
# Move to device and dequantize here. Doing it in the patch layer can result in redundant casts /
|
||||
# dequantizations.
|
||||
orig_params[param_name] = param.to(device=device).get_dequantized_tensor()
|
||||
else:
|
||||
orig_params[param_name] = torch.empty(get_param_shape(param), device="meta")
|
||||
|
||||
params: dict[str, torch.Tensor] = {}
|
||||
|
||||
for patch, patch_weight in patches_and_weights:
|
||||
if device is not None:
|
||||
# Shallow copy the patch so that we can cast it to the target device without modifying the original patch.
|
||||
patch = copy.copy(patch)
|
||||
patch.to(device)
|
||||
|
||||
# TODO(ryand): `self` could be a quantized module. Depending on what the patch is doing with the original
|
||||
# parameters, this might fail or return incorrect results.
|
||||
layer_params = patch.get_parameters(orig_params, weight=patch_weight)
|
||||
|
||||
for param_name, param_weight in layer_params.items():
|
||||
if param_name not in params:
|
||||
params[param_name] = param_weight
|
||||
else:
|
||||
params[param_name] += param_weight
|
||||
|
||||
return params
|
||||
@@ -1,30 +0,0 @@
|
||||
from typing import overload
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
@overload
|
||||
def add_nullable_tensors(a: None, b: None) -> None: ...
|
||||
|
||||
|
||||
@overload
|
||||
def add_nullable_tensors(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: ...
|
||||
|
||||
|
||||
@overload
|
||||
def add_nullable_tensors(a: torch.Tensor, b: None) -> torch.Tensor: ...
|
||||
|
||||
|
||||
@overload
|
||||
def add_nullable_tensors(a: None, b: torch.Tensor) -> torch.Tensor: ...
|
||||
|
||||
|
||||
def add_nullable_tensors(a: torch.Tensor | None, b: torch.Tensor | None) -> torch.Tensor | None:
|
||||
if a is None and b is None:
|
||||
return None
|
||||
elif a is None:
|
||||
return b
|
||||
elif b is None:
|
||||
return a
|
||||
else:
|
||||
return a + b
|
||||
@@ -1,105 +0,0 @@
|
||||
from typing import TypeVar
|
||||
|
||||
import torch
|
||||
|
||||
from invokeai.backend.flux.modules.layers import RMSNorm
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_conv1d import (
|
||||
CustomConv1d,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_conv2d import (
|
||||
CustomConv2d,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_embedding import (
|
||||
CustomEmbedding,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_flux_rms_norm import (
|
||||
CustomFluxRMSNorm,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_group_norm import (
|
||||
CustomGroupNorm,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_linear import (
|
||||
CustomLinear,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_module_mixin import (
|
||||
CustomModuleMixin,
|
||||
)
|
||||
|
||||
AUTOCAST_MODULE_TYPE_MAPPING: dict[type[torch.nn.Module], type[torch.nn.Module]] = {
|
||||
torch.nn.Linear: CustomLinear,
|
||||
torch.nn.Conv1d: CustomConv1d,
|
||||
torch.nn.Conv2d: CustomConv2d,
|
||||
torch.nn.GroupNorm: CustomGroupNorm,
|
||||
torch.nn.Embedding: CustomEmbedding,
|
||||
RMSNorm: CustomFluxRMSNorm,
|
||||
}
|
||||
|
||||
try:
|
||||
# These dependencies are not expected to be present on MacOS.
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_invoke_linear_8_bit_lt import (
|
||||
CustomInvokeLinear8bitLt,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.custom_invoke_linear_nf4 import (
|
||||
CustomInvokeLinearNF4,
|
||||
)
|
||||
from invokeai.backend.quantization.bnb_llm_int8 import InvokeLinear8bitLt
|
||||
from invokeai.backend.quantization.bnb_nf4 import InvokeLinearNF4
|
||||
|
||||
AUTOCAST_MODULE_TYPE_MAPPING[InvokeLinear8bitLt] = CustomInvokeLinear8bitLt
|
||||
AUTOCAST_MODULE_TYPE_MAPPING[InvokeLinearNF4] = CustomInvokeLinearNF4
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
AUTOCAST_MODULE_TYPE_MAPPING_INVERSE = {v: k for k, v in AUTOCAST_MODULE_TYPE_MAPPING.items()}
|
||||
|
||||
|
||||
T = TypeVar("T", bound=torch.nn.Module)
|
||||
|
||||
|
||||
def wrap_custom_layer(module_to_wrap: torch.nn.Module, custom_layer_type: type[T]) -> T:
|
||||
# HACK(ryand): We use custom initialization logic so that we can initialize a new custom layer instance from an
|
||||
# existing layer instance without calling __init__() on the original layer class. We achieve this by copying
|
||||
# the attributes from the original layer instance to the new instance.
|
||||
custom_layer = custom_layer_type.__new__(custom_layer_type)
|
||||
# Note that we share the __dict__.
|
||||
# TODO(ryand): In the future, we may want to do a shallow copy of the __dict__.
|
||||
custom_layer.__dict__ = module_to_wrap.__dict__
|
||||
|
||||
# Initialize the CustomModuleMixin fields.
|
||||
CustomModuleMixin.__init__(custom_layer) # type: ignore
|
||||
return custom_layer
|
||||
|
||||
|
||||
def unwrap_custom_layer(custom_layer: torch.nn.Module, original_layer_type: type[torch.nn.Module]):
|
||||
# HACK(ryand): We use custom initialization logic so that we can initialize a new custom layer instance from an
|
||||
# existing layer instance without calling __init__() on the original layer class. We achieve this by copying
|
||||
# the attributes from the original layer instance to the new instance.
|
||||
original_layer = original_layer_type.__new__(original_layer_type)
|
||||
# Note that we share the __dict__.
|
||||
# TODO(ryand): In the future, we may want to do a shallow copy of the __dict__ and strip out the CustomModuleMixin
|
||||
# fields.
|
||||
original_layer.__dict__ = custom_layer.__dict__
|
||||
return original_layer
|
||||
|
||||
|
||||
def apply_custom_layers_to_model(module: torch.nn.Module, device_autocasting_enabled: bool = False):
|
||||
for name, submodule in module.named_children():
|
||||
override_type = AUTOCAST_MODULE_TYPE_MAPPING.get(type(submodule), None)
|
||||
if override_type is not None:
|
||||
custom_layer = wrap_custom_layer(submodule, override_type)
|
||||
# TODO(ryand): In the future, we should manage this flag on a per-module basis.
|
||||
custom_layer.set_device_autocasting_enabled(device_autocasting_enabled)
|
||||
setattr(module, name, custom_layer)
|
||||
else:
|
||||
# Recursively apply to submodules
|
||||
apply_custom_layers_to_model(submodule, device_autocasting_enabled)
|
||||
|
||||
|
||||
def remove_custom_layers_from_model(module: torch.nn.Module):
|
||||
for name, submodule in module.named_children():
|
||||
override_type = AUTOCAST_MODULE_TYPE_MAPPING_INVERSE.get(type(submodule), None)
|
||||
if override_type is not None:
|
||||
setattr(module, name, unwrap_custom_layer(submodule, override_type))
|
||||
else:
|
||||
remove_custom_layers_from_model(submodule)
|
||||
@@ -1,20 +0,0 @@
|
||||
import itertools
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
def get_effective_device(model: torch.nn.Module) -> torch.device:
|
||||
"""A utility to infer the 'effective' device of a model.
|
||||
|
||||
This utility handles the case where a model is partially loaded onto the GPU, so is safer than just calling:
|
||||
`next(iter(model.parameters())).device`.
|
||||
|
||||
In the worst case, this utility has to check all model parameters, so if you already know the intended model device,
|
||||
then it is better to avoid calling this function.
|
||||
"""
|
||||
# If all parameters are on the CPU, return the CPU device. Otherwise, return the first non-CPU device.
|
||||
for p in itertools.chain(model.parameters(), model.buffers()):
|
||||
if p.device.type != "cpu":
|
||||
return p.device
|
||||
|
||||
return torch.device("cpu")
|
||||
@@ -80,19 +80,19 @@ class FluxVAELoader(ModelLoader):
|
||||
raise ValueError("Only VAECheckpointConfig models are currently supported here.")
|
||||
model_path = Path(config.path)
|
||||
|
||||
with accelerate.init_empty_weights():
|
||||
with SilenceWarnings():
|
||||
model = AutoEncoder(ae_params[config.config_path])
|
||||
sd = load_file(model_path)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
# VAE is broken in float16, which mps defaults to
|
||||
if self._torch_dtype == torch.float16:
|
||||
try:
|
||||
vae_dtype = torch.tensor([1.0], dtype=torch.bfloat16, device=self._torch_device).dtype
|
||||
except TypeError:
|
||||
vae_dtype = torch.float32
|
||||
else:
|
||||
vae_dtype = self._torch_dtype
|
||||
model.to(vae_dtype)
|
||||
sd = load_file(model_path)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
# VAE is broken in float16, which mps defaults to
|
||||
if self._torch_dtype == torch.float16:
|
||||
try:
|
||||
vae_dtype = torch.tensor([1.0], dtype=torch.bfloat16, device=self._torch_device).dtype
|
||||
except TypeError:
|
||||
vae_dtype = torch.float32
|
||||
else:
|
||||
vae_dtype = self._torch_dtype
|
||||
model.to(vae_dtype)
|
||||
|
||||
return model
|
||||
|
||||
@@ -183,9 +183,7 @@ class T5EncoderCheckpointModel(ModelLoader):
|
||||
case SubModelType.Tokenizer2 | SubModelType.Tokenizer3:
|
||||
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
|
||||
case SubModelType.TextEncoder2 | SubModelType.TextEncoder3:
|
||||
return T5EncoderModel.from_pretrained(
|
||||
Path(config.path) / "text_encoder_2", torch_dtype="auto", low_cpu_mem_usage=True
|
||||
)
|
||||
return T5EncoderModel.from_pretrained(Path(config.path) / "text_encoder_2", torch_dtype="auto")
|
||||
|
||||
raise ValueError(
|
||||
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||
@@ -219,18 +217,17 @@ class FluxCheckpointModel(ModelLoader):
|
||||
assert isinstance(config, MainCheckpointConfig)
|
||||
model_path = Path(config.path)
|
||||
|
||||
with accelerate.init_empty_weights():
|
||||
with SilenceWarnings():
|
||||
model = Flux(params[config.config_path])
|
||||
|
||||
sd = load_file(model_path)
|
||||
if "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in sd:
|
||||
sd = convert_bundle_to_flux_transformer_checkpoint(sd)
|
||||
new_sd_size = sum([ten.nelement() * torch.bfloat16.itemsize for ten in sd.values()])
|
||||
self._ram_cache.make_room(new_sd_size)
|
||||
for k in sd.keys():
|
||||
# We need to cast to bfloat16 due to it being the only currently supported dtype for inference
|
||||
sd[k] = sd[k].to(torch.bfloat16)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
sd = load_file(model_path)
|
||||
if "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale" in sd:
|
||||
sd = convert_bundle_to_flux_transformer_checkpoint(sd)
|
||||
new_sd_size = sum([ten.nelement() * torch.bfloat16.itemsize for ten in sd.values()])
|
||||
self._ram_cache.make_room(new_sd_size)
|
||||
for k in sd.keys():
|
||||
# We need to cast to bfloat16 due to it being the only currently supported dtype for inference
|
||||
sd[k] = sd[k].to(torch.bfloat16)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
return model
|
||||
|
||||
|
||||
@@ -261,11 +258,11 @@ class FluxGGUFCheckpointModel(ModelLoader):
|
||||
assert isinstance(config, MainGGUFCheckpointConfig)
|
||||
model_path = Path(config.path)
|
||||
|
||||
with accelerate.init_empty_weights():
|
||||
with SilenceWarnings():
|
||||
model = Flux(params[config.config_path])
|
||||
|
||||
# HACK(ryand): We shouldn't be hard-coding the compute_dtype here.
|
||||
sd = gguf_sd_loader(model_path, compute_dtype=torch.bfloat16)
|
||||
# HACK(ryand): We shouldn't be hard-coding the compute_dtype here.
|
||||
sd = gguf_sd_loader(model_path, compute_dtype=torch.bfloat16)
|
||||
|
||||
# HACK(ryand): There are some broken GGUF models in circulation that have the wrong shape for img_in.weight.
|
||||
# We override the shape here to fix the issue.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user