Compare commits
143 Commits
feat/contr
...
v3.0.1rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6bdcc32414 | ||
|
|
4f39c81dec | ||
|
|
3376968cbb | ||
|
|
0420d75d2b | ||
|
|
3bd9c27a79 | ||
|
|
b6522cf2cf | ||
|
|
13ac5c6899 | ||
|
|
05070304ff | ||
|
|
af8fc6ff82 | ||
|
|
f86d0d1b69 | ||
|
|
e6741cee75 | ||
|
|
575ebaeb75 | ||
|
|
385483ff8e | ||
|
|
c7f883d22a | ||
|
|
58ff5d3f5b | ||
|
|
f060e321eb | ||
|
|
dc8c3d8073 | ||
|
|
819136c345 | ||
|
|
989b68c772 | ||
|
|
a6347a1d3c | ||
|
|
a00d1e87e4 | ||
|
|
c7d24081e2 | ||
|
|
17900e5140 | ||
|
|
6fa42cb10c | ||
|
|
4bea846199 | ||
|
|
3dccc4d61e | ||
|
|
bf0587da5f | ||
|
|
58c0bee325 | ||
|
|
b8f43f444a | ||
|
|
da76f6fee4 | ||
|
|
c4f064bbf3 | ||
|
|
0ce8472562 | ||
|
|
3e206d4d6a | ||
|
|
ce7fa96dbc | ||
|
|
a705461c04 | ||
|
|
fda7e0a71a | ||
|
|
513b223ef6 | ||
|
|
db05445103 | ||
|
|
30c3b7a6fc | ||
|
|
9e9dce44b4 | ||
|
|
6fd8543e69 | ||
|
|
db48f3230b | ||
|
|
397604a094 | ||
|
|
f5139b174a | ||
|
|
050e5091db | ||
|
|
2c5b539d3a | ||
|
|
85ad5ef204 | ||
|
|
5beb11f4e2 | ||
|
|
844d37c642 | ||
|
|
b3723d1ccf | ||
|
|
bd43751323 | ||
|
|
e32cd794f7 | ||
|
|
761fc4beb8 | ||
|
|
531bc40d3f | ||
|
|
676051edb9 | ||
|
|
de65b82569 | ||
|
|
934f9afd7e | ||
|
|
1c01a31ee8 | ||
|
|
c5389b3298 | ||
|
|
fdbab5ffa9 | ||
|
|
a6e544ebd5 | ||
|
|
75b0507434 | ||
|
|
59c2556e6b | ||
|
|
4fe889bbf8 | ||
|
|
cbcd416b70 | ||
|
|
6fa244a343 | ||
|
|
e5a660930c | ||
|
|
61291ea105 | ||
|
|
840205496a | ||
|
|
016797c890 | ||
|
|
00e69d5d12 | ||
|
|
8e90f9024d | ||
|
|
751c4407e4 | ||
|
|
6c46304eb8 | ||
|
|
0eb31c5710 | ||
|
|
6295e56d96 | ||
|
|
5202610160 | ||
|
|
8d1b8179af | ||
|
|
3bdb059eb7 | ||
|
|
b0ebd148fa | ||
|
|
9f94d0e52a | ||
|
|
9c180da58a | ||
|
|
57d833035d | ||
|
|
c145681488 | ||
|
|
3eaf8c3b2f | ||
|
|
d9527bf445 | ||
|
|
032e9c8165 | ||
|
|
dbc3d42afc | ||
|
|
d5998ad3ef | ||
|
|
a4c8d86faa | ||
|
|
f4da66aa0f | ||
|
|
7f5a89f567 | ||
|
|
2db9b3b2ae | ||
|
|
77107dfcbc | ||
|
|
11e6ecc1bf | ||
|
|
7d337dccc2 | ||
|
|
91e903c8ab | ||
|
|
efa615a8fd | ||
|
|
cf10852ee3 | ||
|
|
437532f2f9 | ||
|
|
4194a0ed99 | ||
|
|
7ce5b6504f | ||
|
|
aea8ad5670 | ||
|
|
97f4475fdf | ||
|
|
4f9c728db0 | ||
|
|
7ea477abef | ||
|
|
d42c394ab7 | ||
|
|
61fa960a18 | ||
|
|
1969afd038 | ||
|
|
2b65e40896 | ||
|
|
d6bf6513ef | ||
|
|
14659277e7 | ||
|
|
cbb90cbdbb | ||
|
|
9c59083406 | ||
|
|
86b62cfccc | ||
|
|
e766ddbcf4 | ||
|
|
374b4a1b12 | ||
|
|
0cf7a10c5c | ||
|
|
1c44a0feba | ||
|
|
66cdeba8a1 | ||
|
|
d5a75eb833 | ||
|
|
8eab96c441 | ||
|
|
4754a94102 | ||
|
|
5c6f417471 | ||
|
|
0beec08d38 | ||
|
|
02618a701d | ||
|
|
f2a6f0cf21 | ||
|
|
07a90c0198 | ||
|
|
28031ead70 | ||
|
|
4b334be7d0 | ||
|
|
af4579b4d4 | ||
|
|
35acb5de76 | ||
|
|
225f608556 | ||
|
|
00d3cd4aed | ||
|
|
5e59edfaf1 | ||
|
|
fdc444ed61 | ||
|
|
075f9b3a7a | ||
|
|
b1d7c9b306 | ||
|
|
5607794dbb | ||
|
|
c5147d0f57 | ||
|
|
6452d0fc28 | ||
|
|
5468d9a9fc | ||
|
|
75863e7181 |
9
.github/workflows/close-inactive-issues.yml
vendored
@@ -1,11 +1,11 @@
|
||||
name: Close inactive issues
|
||||
on:
|
||||
schedule:
|
||||
- cron: "00 6 * * *"
|
||||
- cron: "00 4 * * *"
|
||||
|
||||
env:
|
||||
DAYS_BEFORE_ISSUE_STALE: 14
|
||||
DAYS_BEFORE_ISSUE_CLOSE: 28
|
||||
DAYS_BEFORE_ISSUE_STALE: 30
|
||||
DAYS_BEFORE_ISSUE_CLOSE: 14
|
||||
|
||||
jobs:
|
||||
close-issues:
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
- uses: actions/stale@v8
|
||||
with:
|
||||
days-before-issue-stale: ${{ env.DAYS_BEFORE_ISSUE_STALE }}
|
||||
days-before-issue-close: ${{ env.DAYS_BEFORE_ISSUE_CLOSE }}
|
||||
@@ -23,5 +23,6 @@ jobs:
|
||||
close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please recreate the issue."
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
exempt-issue-labels: "Active Issue"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
operations-per-run: 500
|
||||
|
||||
BIN
docs/assets/nodes/groupsallscale.png
Normal file
|
After Width: | Height: | Size: 490 KiB |
BIN
docs/assets/nodes/groupsconditioning.png
Normal file
|
After Width: | Height: | Size: 335 KiB |
BIN
docs/assets/nodes/groupscontrol.png
Normal file
|
After Width: | Height: | Size: 217 KiB |
BIN
docs/assets/nodes/groupsimgvae.png
Normal file
|
After Width: | Height: | Size: 244 KiB |
BIN
docs/assets/nodes/groupsiterate.png
Normal file
|
After Width: | Height: | Size: 948 KiB |
BIN
docs/assets/nodes/groupslora.png
Normal file
|
After Width: | Height: | Size: 292 KiB |
BIN
docs/assets/nodes/groupsmultigenseeding.png
Normal file
|
After Width: | Height: | Size: 420 KiB |
BIN
docs/assets/nodes/groupsnoise.png
Normal file
|
After Width: | Height: | Size: 179 KiB |
BIN
docs/assets/nodes/groupsrandseed.png
Normal file
|
After Width: | Height: | Size: 216 KiB |
BIN
docs/assets/nodes/nodescontrol.png
Normal file
|
After Width: | Height: | Size: 439 KiB |
BIN
docs/assets/nodes/nodesi2i.png
Normal file
|
After Width: | Height: | Size: 563 KiB |
BIN
docs/assets/nodes/nodest2i.png
Normal file
|
After Width: | Height: | Size: 353 KiB |
@@ -136,19 +136,16 @@ command-line options by giving the `--help` argument:
|
||||
|
||||
```
|
||||
(.venv) > invokeai-web --help
|
||||
usage: InvokeAI [-h] [--host HOST] [--port PORT] [--allow_origins [ALLOW_ORIGINS ...]] [--allow_credentials | --no-allow_credentials]
|
||||
[--allow_methods [ALLOW_METHODS ...]] [--allow_headers [ALLOW_HEADERS ...]] [--esrgan | --no-esrgan]
|
||||
[--internet_available | --no-internet_available] [--log_tokenization | --no-log_tokenization]
|
||||
[--nsfw_checker | --no-nsfw_checker] [--patchmatch | --no-patchmatch] [--restore | --no-restore]
|
||||
[--always_use_cpu | --no-always_use_cpu] [--free_gpu_mem | --no-free_gpu_mem] [--max_cache_size MAX_CACHE_SIZE]
|
||||
[--max_vram_cache_size MAX_VRAM_CACHE_SIZE] [--precision {auto,float16,float32,autocast}]
|
||||
[--sequential_guidance | --no-sequential_guidance] [--xformers_enabled | --no-xformers_enabled]
|
||||
[--tiled_decode | --no-tiled_decode] [--root ROOT] [--autoimport_dir AUTOIMPORT_DIR] [--lora_dir LORA_DIR]
|
||||
[--embedding_dir EMBEDDING_DIR] [--controlnet_dir CONTROLNET_DIR] [--conf_path CONF_PATH] [--models_dir MODELS_DIR]
|
||||
[--legacy_conf_dir LEGACY_CONF_DIR] [--db_dir DB_DIR] [--outdir OUTDIR] [--from_file FROM_FILE]
|
||||
[--use_memory_db | --no-use_memory_db] [--model MODEL] [--log_handlers [LOG_HANDLERS ...]]
|
||||
[--log_format {plain,color,syslog,legacy}] [--log_level {debug,info,warning,error,critical}]
|
||||
...
|
||||
usage: InvokeAI [-h] [--host HOST] [--port PORT] [--allow_origins [ALLOW_ORIGINS ...]] [--allow_credentials | --no-allow_credentials] [--allow_methods [ALLOW_METHODS ...]]
|
||||
[--allow_headers [ALLOW_HEADERS ...]] [--esrgan | --no-esrgan] [--internet_available | --no-internet_available] [--log_tokenization | --no-log_tokenization]
|
||||
[--nsfw_checker | --no-nsfw_checker] [--invisible_watermark | --no-invisible_watermark] [--patchmatch | --no-patchmatch] [--restore | --no-restore]
|
||||
[--always_use_cpu | --no-always_use_cpu] [--free_gpu_mem | --no-free_gpu_mem] [--max_loaded_models MAX_LOADED_MODELS] [--max_cache_size MAX_CACHE_SIZE]
|
||||
[--max_vram_cache_size MAX_VRAM_CACHE_SIZE] [--gpu_mem_reserved GPU_MEM_RESERVED] [--precision {auto,float16,float32,autocast}]
|
||||
[--sequential_guidance | --no-sequential_guidance] [--xformers_enabled | --no-xformers_enabled] [--tiled_decode | --no-tiled_decode] [--root ROOT]
|
||||
[--autoimport_dir AUTOIMPORT_DIR] [--lora_dir LORA_DIR] [--embedding_dir EMBEDDING_DIR] [--controlnet_dir CONTROLNET_DIR] [--conf_path CONF_PATH]
|
||||
[--models_dir MODELS_DIR] [--legacy_conf_dir LEGACY_CONF_DIR] [--db_dir DB_DIR] [--outdir OUTDIR] [--from_file FROM_FILE]
|
||||
[--use_memory_db | --no-use_memory_db] [--model MODEL] [--log_handlers [LOG_HANDLERS ...]] [--log_format {plain,color,syslog,legacy}]
|
||||
[--log_level {debug,info,warning,error,critical}] [--version | --no-version]
|
||||
```
|
||||
|
||||
## The Configuration Settings
|
||||
@@ -179,6 +176,7 @@ These configuration settings allow you to enable and disable various InvokeAI fe
|
||||
| `internet_available` | `true` | When a resource is not available locally, try to fetch it via the internet |
|
||||
| `log_tokenization` | `false` | Before each text2image generation, print a color-coded representation of the prompt to the console; this can help understand why a prompt is not working as expected |
|
||||
| `nsfw_checker` | `true` | Activate the NSFW checker to blur out risque images |
|
||||
| `invisible_watermark` | `true` | Write an invisible watermark 'InvokeAI' into generated images for use by AI image detectors |
|
||||
| `patchmatch` | `true` | Activate the "patchmatch" algorithm for improved inpainting |
|
||||
| `restore` | `true` | Activate the facial restoration features (DEPRECATED; restoration features will be removed in 3.0.0) |
|
||||
|
||||
|
||||
@@ -61,11 +61,13 @@ A noise scheduler (eg. DPM++ 2M Karras) schedules the subtraction of noise from
|
||||
| ImageInverseLerp | Inverse linear interpolation of all pixels of an image |
|
||||
| ImageLerp | Linear interpolation of all pixels of an image |
|
||||
| ImageMultiply | Multiplies two images together using `PIL.ImageChops.Multiply()` |
|
||||
| ImageNSFWBlurInvocation | Detects and blurs images that may contain sexually explicit content |
|
||||
| ImagePaste | Pastes an image into another image |
|
||||
| ImageProcessor | Base class for invocations that reprocess images for ControlNet |
|
||||
| ImageResize | Resizes an image to specific dimensions |
|
||||
| ImageScale | Scales an image by a factor |
|
||||
| ImageToLatents | Scales latents by a given factor |
|
||||
| ImageWatermarkInvocation | Adds an invisible watermark to images |
|
||||
| InfillColor | Infills transparent areas of an image with a solid color |
|
||||
| InfillPatchMatch | Infills transparent areas of an image using the PatchMatch algorithm |
|
||||
| InfillTile | Infills transparent areas of an image with tiles of the image |
|
||||
@@ -116,49 +118,49 @@ There are several node grouping concepts that can be examined with a narrow focu
|
||||
|
||||
As described, an initial noise tensor is necessary for the latent diffusion process. As a result, all non-image *ToLatents nodes require a noise node input.
|
||||
|
||||
<img width="654" alt="groupsnoise" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/2e8d297e-ad55-4d27-bc93-c119dad2a2c5">
|
||||

|
||||
|
||||
### Conditioning
|
||||
|
||||
As described, conditioning is necessary for the latent diffusion process, whether empty or not. As a result, all non-image *ToLatents nodes require positive and negative conditioning inputs. Conditioning is reliant on a CLIP tokenizer provided by the Model Loader node.
|
||||
|
||||
<img width="1024" alt="groupsconditioning" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/f8f7ad8a-8d9c-418e-b5ad-1437b774b27e">
|
||||

|
||||
|
||||
### Image Space & VAE
|
||||
|
||||
The ImageToLatents node doesn't require a noise node input, but requires a VAE input to convert the image from image space into latent space. In reverse, the LatentsToImage node requires a VAE input to convert from latent space back into image space.
|
||||
|
||||
<img width="637" alt="groupsimgvae" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/dd99969c-e0a8-4f78-9b17-3ffe179cef9a">
|
||||

|
||||
|
||||
### Defined & Random Seeds
|
||||
|
||||
It is common to want to use both the same seed (for continuity) and random seeds (for variance). To define a seed, simply enter it into the 'Seed' field on a noise node. Conversely, the RandomInt node generates a random integer between 'Low' and 'High', and can be used as input to the 'Seed' edge point on a noise node to randomize your seed.
|
||||
|
||||
<img width="922" alt="groupsrandseed" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/af55bc20-60f6-438e-aba5-3ec871443710">
|
||||

|
||||
|
||||
### Control
|
||||
|
||||
Control means to guide the diffusion process to adhere to a defined input or structure. Control can be provided as input to non-image *ToLatents nodes from ControlNet nodes. ControlNet nodes usually require an image processor which converts an input image for use with ControlNet.
|
||||
|
||||
<img width="805" alt="groupscontrol" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/cc9c5de7-23a7-46c8-bbad-1f3609d999a6">
|
||||

|
||||
|
||||
### LoRA
|
||||
|
||||
The Lora Loader node lets you load a LoRA (say that ten times fast) and pass it as output to both the Prompt (Compel) and non-image *ToLatents nodes. A model's CLIP tokenizer is passed through the LoRA into Prompt (Compel), where it affects conditioning. A model's U-Net is also passed through the LoRA into a non-image *ToLatents node, where it affects noise prediction.
|
||||
|
||||
<img width="993" alt="groupslora" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/630962b0-d914-4505-b3ea-ccae9b0269da">
|
||||

|
||||
|
||||
### Scaling
|
||||
|
||||
Use the ImageScale, ScaleLatents, and Upscale nodes to upscale images and/or latent images. The chosen method differs across contexts. However, be aware that latents are already noisy and compressed at their original resolution; scaling an image could produce more detailed results.
|
||||
|
||||
<img width="644" alt="groupsallscale" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/99314f05-dd9f-4b6d-b378-31de55346a13">
|
||||

|
||||
|
||||
### Iteration + Multiple Images as Input
|
||||
|
||||
Iteration is a common concept in any processing, and means to repeat a process with given input. In nodes, you're able to use the Iterate node to iterate through collections usually gathered by the Collect node. The Iterate node has many potential uses, from processing a collection of images one after another, to varying seeds across multiple image generations and more. This screenshot demonstrates how to collect several images and pass them out one at a time.
|
||||
|
||||
<img width="788" alt="groupsiterate" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/4af5ca27-82c9-4018-8c5b-024d3ee0a121">
|
||||

|
||||
|
||||
### Multiple Image Generation + Random Seeds
|
||||
|
||||
@@ -166,7 +168,7 @@ Multiple image generation in the node editor is done using the RandomRange node.
|
||||
|
||||
To control seeds across generations takes some care. The first row in the screenshot will generate multiple images with different seeds, but using the same RandomRange parameters across invocations will result in the same group of random seeds being used across the images, producing repeatable results. In the second row, adding the RandomInt node as input to RandomRange's 'Seed' edge point will ensure that seeds are varied across all images across invocations, producing varied results.
|
||||
|
||||
<img width="1027" alt="groupsmultigenseeding" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/518d1b2b-fed1-416b-a052-ab06552521b3">
|
||||

|
||||
|
||||
## Examples
|
||||
|
||||
@@ -174,7 +176,7 @@ With our knowledge of node grouping and the diffusion process, let’s break dow
|
||||
|
||||
### Basic text-to-image Node Graph
|
||||
|
||||
<img width="875" alt="nodest2i" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/17c67720-c376-4db8-94f0-5e00381a61ee">
|
||||

|
||||
|
||||
- Model Loader: A necessity to generating images (as we’ve read above). We choose our model from the dropdown. It outputs a U-Net, CLIP tokenizer, and VAE.
|
||||
- Prompt (Compel): Another necessity. Two prompt nodes are created. One will output positive conditioning (what you want, ‘dog’), one will output negative (what you don’t want, ‘cat’). They both input the CLIP tokenizer that the Model Loader node outputs.
|
||||
@@ -184,7 +186,7 @@ With our knowledge of node grouping and the diffusion process, let’s break dow
|
||||
|
||||
### Basic image-to-image Node Graph
|
||||
|
||||
<img width="998" alt="nodesi2i" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/3f2c95d5-cee7-4415-9b79-b46ee60a92fe">
|
||||

|
||||
|
||||
- Model Loader: Choose a model from the dropdown.
|
||||
- Prompt (Compel): Two prompt nodes. One positive (dog), one negative (dog). Same CLIP inputs from the Model Loader node as before.
|
||||
@@ -195,7 +197,7 @@ With our knowledge of node grouping and the diffusion process, let’s break dow
|
||||
|
||||
### Basic ControlNet Node Graph
|
||||
|
||||
<img width="703" alt="nodescontrol" src="https://github.com/ymgenesis/InvokeAI/assets/25252829/b02ded86-ceb4-44a2-9910-e19ad184d471">
|
||||

|
||||
|
||||
- Model Loader
|
||||
- Prompt (Compel)
|
||||
|
||||
@@ -16,21 +16,24 @@ Output Example:
|
||||
|
||||
---
|
||||
|
||||
## **Seamless Tiling**
|
||||
## **Invisible Watermark**
|
||||
|
||||
The seamless tiling mode causes generated images to seamlessly tile
|
||||
with itself creating repetitive wallpaper-like patterns. To use it,
|
||||
activate the Seamless Tiling option in the Web GUI and then select
|
||||
whether to tile on the X (horizontal) and/or Y (vertical) axes. Tiling
|
||||
will then be active for the next set of generations.
|
||||
In keeping with the principles for responsible AI generation, and to
|
||||
help AI researchers avoid synthetic images contaminating their
|
||||
training sets, InvokeAI adds an invisible watermark to each of the
|
||||
final images it generates. The watermark consists of the text
|
||||
"InvokeAI" and can be viewed using the
|
||||
[invisible-watermarks](https://github.com/ShieldMnt/invisible-watermark)
|
||||
tool.
|
||||
|
||||
A nice prompt to test seamless tiling with is:
|
||||
Watermarking is controlled using the `invisible-watermark` setting in
|
||||
`invokeai.yaml`. To turn it off, add the following line under the `Features`
|
||||
category.
|
||||
|
||||
```
|
||||
pond garden with lotus by claude monet"
|
||||
invisible_watermark: false
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## **Weighted Prompts**
|
||||
|
||||
@@ -39,34 +42,10 @@ priority to them, by adding `:<percent>` to the end of the section you wish to u
|
||||
example consider this prompt:
|
||||
|
||||
```bash
|
||||
tabby cat:0.25 white duck:0.75 hybrid
|
||||
(tabby cat):0.25 (white duck):0.75 hybrid
|
||||
```
|
||||
|
||||
This will tell the sampler to invest 25% of its effort on the tabby cat aspect of the image and 75%
|
||||
on the white duck aspect (surprisingly, this example actually works). The prompt weights can use any
|
||||
combination of integers and floating point numbers, and they do not need to add up to 1.
|
||||
|
||||
## **Thresholding and Perlin Noise Initialization Options**
|
||||
|
||||
Under the Noise section of the Web UI, you will find two options named
|
||||
Perlin Noise and Noise Threshold. [Perlin
|
||||
noise](https://en.wikipedia.org/wiki/Perlin_noise) is a type of
|
||||
structured noise used to simulate terrain and other natural
|
||||
textures. The slider controls the percentage of perlin noise that will
|
||||
be mixed into the image at the beginning of generation. Adding a little
|
||||
perlin noise to a generation will alter the image substantially.
|
||||
|
||||
The noise threshold limits the range of the latent values during
|
||||
sampling and helps combat the oversharpening seem with higher CFG
|
||||
scale values.
|
||||
|
||||
For better intuition into what these options do in practice:
|
||||
|
||||

|
||||
|
||||
In generating this graphic, perlin noise at initialization was
|
||||
programmatically varied going across on the diagram by values 0.0,
|
||||
0.1, 0.2, 0.4, 0.5, 0.6, 0.8, 0.9, 1.0; and the threshold was varied
|
||||
going down from 0, 1, 2, 3, 4, 5, 10, 20, 100. The other options are
|
||||
fixed using the prompt "a portrait of a beautiful young lady" a CFG of
|
||||
20, 100 steps, and a seed of 1950357039.
|
||||
|
||||
@@ -1,9 +1,15 @@
|
||||
import typing
|
||||
from enum import Enum
|
||||
from fastapi import Body
|
||||
from fastapi.routing import APIRouter
|
||||
from pathlib import Path
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.backend.image_util.patchmatch import PatchMatch
|
||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
|
||||
from invokeai.app.invocations.upscale import ESRGAN_MODELS
|
||||
|
||||
from invokeai.version import __version__
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
@@ -16,6 +22,10 @@ class LogLevel(int, Enum):
|
||||
Warning = logging.WARNING
|
||||
Error = logging.ERROR
|
||||
Critical = logging.CRITICAL
|
||||
|
||||
class Upscaler(BaseModel):
|
||||
upscaling_method: str = Field(description="Name of upscaling method")
|
||||
upscaling_models: list[str] = Field(description="List of upscaling models for this method")
|
||||
|
||||
app_router = APIRouter(prefix="/v1/app", tags=["app"])
|
||||
|
||||
@@ -30,6 +40,9 @@ class AppConfig(BaseModel):
|
||||
"""App Config Response"""
|
||||
|
||||
infill_methods: list[str] = Field(description="List of available infill methods")
|
||||
upscaling_methods: list[Upscaler] = Field(description="List of upscaling methods")
|
||||
nsfw_methods: list[str] = Field(description="List of NSFW checking methods")
|
||||
watermarking_methods: list[str] = Field(description="List of invisible watermark methods")
|
||||
|
||||
|
||||
@app_router.get(
|
||||
@@ -46,7 +59,30 @@ async def get_config() -> AppConfig:
|
||||
infill_methods = ['tile']
|
||||
if PatchMatch.patchmatch_available():
|
||||
infill_methods.append('patchmatch')
|
||||
return AppConfig(infill_methods=infill_methods)
|
||||
|
||||
|
||||
upscaling_models = []
|
||||
for model in typing.get_args(ESRGAN_MODELS):
|
||||
upscaling_models.append(str(Path(model).stem))
|
||||
upscaler = Upscaler(
|
||||
upscaling_method = 'esrgan',
|
||||
upscaling_models = upscaling_models
|
||||
)
|
||||
|
||||
nsfw_methods = []
|
||||
if SafetyChecker.safety_checker_available():
|
||||
nsfw_methods.append('nsfw_checker')
|
||||
|
||||
watermarking_methods = []
|
||||
if InvisibleWatermark.invisible_watermark_available():
|
||||
watermarking_methods.append('invisible_watermark')
|
||||
|
||||
return AppConfig(
|
||||
infill_methods=infill_methods,
|
||||
upscaling_methods=[upscaler],
|
||||
nsfw_methods=nsfw_methods,
|
||||
watermarking_methods=watermarking_methods,
|
||||
)
|
||||
|
||||
@app_router.get(
|
||||
"/logging",
|
||||
|
||||
@@ -298,7 +298,7 @@ async def search_for_models(
|
||||
)->List[pathlib.Path]:
|
||||
if not search_path.is_dir():
|
||||
raise HTTPException(status_code=404, detail=f"The search path '{search_path}' does not exist or is not directory")
|
||||
return ApiDependencies.invoker.services.model_manager.search_for_models([search_path])
|
||||
return ApiDependencies.invoker.services.model_manager.search_for_models(search_path)
|
||||
|
||||
@models_router.get(
|
||||
"/ckpt_confs",
|
||||
|
||||
@@ -203,7 +203,10 @@ def invoke_api():
|
||||
return find_port(port=port + 1)
|
||||
else:
|
||||
return port
|
||||
|
||||
|
||||
from invokeai.backend.install.check_root import check_invokeai_root
|
||||
check_invokeai_root(app_config) # note, may exit with an exception if root not set up
|
||||
|
||||
port = find_port(app_config.port)
|
||||
if port != app_config.port:
|
||||
logger.warn(f"Port {app_config.port} in use, using port {port}")
|
||||
|
||||
|
Before Width: | Height: | Size: 33 KiB After Width: | Height: | Size: 33 KiB |
@@ -95,7 +95,7 @@ class CompelInvocation(BaseInvocation):
|
||||
def _lora_loader():
|
||||
for lora in self.clip.loras:
|
||||
lora_info = context.services.model_manager.get_model(
|
||||
**lora.dict(exclude={"weight"}))
|
||||
**lora.dict(exclude={"weight"}), context=context)
|
||||
yield (lora_info.context.model, lora.weight)
|
||||
del lora_info
|
||||
return
|
||||
@@ -171,16 +171,16 @@ class CompelInvocation(BaseInvocation):
|
||||
class SDXLPromptInvocationBase:
|
||||
def run_clip_raw(self, context, clip_field, prompt, get_pooled):
|
||||
tokenizer_info = context.services.model_manager.get_model(
|
||||
**clip_field.tokenizer.dict(),
|
||||
**clip_field.tokenizer.dict(), context=context,
|
||||
)
|
||||
text_encoder_info = context.services.model_manager.get_model(
|
||||
**clip_field.text_encoder.dict(),
|
||||
**clip_field.text_encoder.dict(), context=context,
|
||||
)
|
||||
|
||||
def _lora_loader():
|
||||
for lora in clip_field.loras:
|
||||
lora_info = context.services.model_manager.get_model(
|
||||
**lora.dict(exclude={"weight"}))
|
||||
**lora.dict(exclude={"weight"}), context=context)
|
||||
yield (lora_info.context.model, lora.weight)
|
||||
del lora_info
|
||||
return
|
||||
@@ -196,6 +196,7 @@ class SDXLPromptInvocationBase:
|
||||
model_name=name,
|
||||
base_model=clip_field.text_encoder.base_model,
|
||||
model_type=ModelType.TextualInversion,
|
||||
context=context,
|
||||
).context.model
|
||||
)
|
||||
except ModelNotFoundException:
|
||||
@@ -240,16 +241,16 @@ class SDXLPromptInvocationBase:
|
||||
|
||||
def run_clip_compel(self, context, clip_field, prompt, get_pooled):
|
||||
tokenizer_info = context.services.model_manager.get_model(
|
||||
**clip_field.tokenizer.dict(),
|
||||
**clip_field.tokenizer.dict(), context=context,
|
||||
)
|
||||
text_encoder_info = context.services.model_manager.get_model(
|
||||
**clip_field.text_encoder.dict(),
|
||||
**clip_field.text_encoder.dict(), context=context,
|
||||
)
|
||||
|
||||
def _lora_loader():
|
||||
for lora in clip_field.loras:
|
||||
lora_info = context.services.model_manager.get_model(
|
||||
**lora.dict(exclude={"weight"}))
|
||||
**lora.dict(exclude={"weight"}), context=context)
|
||||
yield (lora_info.context.model, lora.weight)
|
||||
del lora_info
|
||||
return
|
||||
@@ -265,6 +266,7 @@ class SDXLPromptInvocationBase:
|
||||
model_name=name,
|
||||
base_model=clip_field.text_encoder.base_model,
|
||||
model_type=ModelType.TextualInversion,
|
||||
context=context,
|
||||
).context.model
|
||||
)
|
||||
except ModelNotFoundException:
|
||||
|
||||
@@ -20,7 +20,7 @@ from ...backend.model_management import BaseModelType, ModelType
|
||||
from ..models.image import ImageCategory, ImageField, ResourceOrigin
|
||||
from .baseinvocation import (BaseInvocation, BaseInvocationOutput,
|
||||
InvocationConfig, InvocationContext)
|
||||
from .image import ImageOutput, PILInvocationConfig
|
||||
from ..models.image import ImageOutput, PILInvocationConfig
|
||||
|
||||
CONTROLNET_DEFAULT_MODELS = [
|
||||
###########################################
|
||||
|
||||
@@ -4,61 +4,21 @@ from typing import Literal, Optional
|
||||
|
||||
import numpy
|
||||
from PIL import Image, ImageFilter, ImageOps, ImageChops
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import Field
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
from ..models.image import ImageCategory, ImageField, ResourceOrigin
|
||||
from invokeai.app.invocations.metadata import CoreMetadata
|
||||
from ..models.image import (
|
||||
ImageCategory, ImageField, ResourceOrigin,
|
||||
PILInvocationConfig, ImageOutput, MaskOutput,
|
||||
)
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
InvocationContext,
|
||||
InvocationConfig,
|
||||
)
|
||||
|
||||
|
||||
class PILInvocationConfig(BaseModel):
|
||||
"""Helper class to provide all PIL invocations with additional config"""
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"tags": ["PIL", "image"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class ImageOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output an image"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["image_output"] = "image_output"
|
||||
image: ImageField = Field(default=None, description="The output image")
|
||||
width: int = Field(description="The width of the image in pixels")
|
||||
height: int = Field(description="The height of the image in pixels")
|
||||
# fmt: on
|
||||
|
||||
class Config:
|
||||
schema_extra = {"required": ["type", "image", "width", "height"]}
|
||||
|
||||
|
||||
class MaskOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output a mask"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["mask"] = "mask"
|
||||
mask: ImageField = Field(default=None, description="The output mask")
|
||||
width: int = Field(description="The width of the mask in pixels")
|
||||
height: int = Field(description="The height of the mask in pixels")
|
||||
# fmt: on
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
"required": [
|
||||
"type",
|
||||
"mask",
|
||||
]
|
||||
}
|
||||
|
||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
|
||||
|
||||
class LoadImageInvocation(BaseInvocation):
|
||||
"""Load an image and provide it as output."""
|
||||
@@ -397,7 +357,6 @@ class ImageConvertInvocation(BaseInvocation, PILInvocationConfig):
|
||||
height=image_dto.height,
|
||||
)
|
||||
|
||||
|
||||
class ImageBlurInvocation(BaseInvocation, PILInvocationConfig):
|
||||
"""Blurs an image"""
|
||||
|
||||
@@ -602,7 +561,6 @@ class ImageLerpInvocation(BaseInvocation, PILInvocationConfig):
|
||||
height=image_dto.height,
|
||||
)
|
||||
|
||||
|
||||
class ImageInverseLerpInvocation(BaseInvocation, PILInvocationConfig):
|
||||
"""Inverse linear interpolation of all pixels of an image"""
|
||||
|
||||
@@ -650,3 +608,97 @@ class ImageInverseLerpInvocation(BaseInvocation, PILInvocationConfig):
|
||||
width=image_dto.width,
|
||||
height=image_dto.height,
|
||||
)
|
||||
|
||||
class ImageNSFWBlurInvocation(BaseInvocation, PILInvocationConfig):
|
||||
"""Add blur to NSFW-flagged images"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["img_nsfw"] = "img_nsfw"
|
||||
|
||||
# Inputs
|
||||
image: Optional[ImageField] = Field(default=None, description="The image to check")
|
||||
metadata: Optional[CoreMetadata] = Field(default=None, description="Optional core metadata to be written to the image")
|
||||
# fmt: on
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Blur NSFW Images",
|
||||
"tags": ["image", "nsfw", "checker"]
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(self.image.image_name)
|
||||
|
||||
logger = context.services.logger
|
||||
logger.debug("Running NSFW checker")
|
||||
if SafetyChecker.has_nsfw_concept(image):
|
||||
logger.info("A potentially NSFW image has been detected. Image will be blurred.")
|
||||
blurry_image = image.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
caution = self._get_caution_img()
|
||||
blurry_image.paste(caution,(0,0),caution)
|
||||
image = blurry_image
|
||||
|
||||
image_dto = context.services.images.create(
|
||||
image=image,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
image_category=ImageCategory.GENERAL,
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
metadata=self.metadata.dict() if self.metadata else None,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
image=ImageField(image_name=image_dto.image_name),
|
||||
width=image_dto.width,
|
||||
height=image_dto.height,
|
||||
)
|
||||
|
||||
def _get_caution_img(self)->Image:
|
||||
import invokeai.app.assets.images as image_assets
|
||||
caution = Image.open(Path(image_assets.__path__[0]) / 'caution.png')
|
||||
return caution.resize((caution.width // 2, caution.height //2))
|
||||
|
||||
class ImageWatermarkInvocation(BaseInvocation, PILInvocationConfig):
|
||||
""" Add an invisible watermark to an image """
|
||||
|
||||
# fmt: off
|
||||
type: Literal["img_watermark"] = "img_watermark"
|
||||
|
||||
# Inputs
|
||||
image: Optional[ImageField] = Field(default=None, description="The image to check")
|
||||
text: str = Field(default='InvokeAI', description="Watermark text")
|
||||
metadata: Optional[CoreMetadata] = Field(default=None, description="Optional core metadata to be written to the image")
|
||||
# fmt: on
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Add Invisible Watermark",
|
||||
"tags": ["image", "watermark", "invisible"]
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(self.image.image_name)
|
||||
new_image = InvisibleWatermark.add_watermark(image, self.text)
|
||||
image_dto = context.services.images.create(
|
||||
image=new_image,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
image_category=ImageCategory.GENERAL,
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
metadata=self.metadata.dict() if self.metadata else None,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
image=ImageField(image_name=image_dto.image_name),
|
||||
width=image_dto.width,
|
||||
height=image_dto.height,
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -501,7 +501,7 @@ class LatentsToImageInvocation(BaseInvocation):
|
||||
vae: VaeField = Field(default=None, description="Vae submodel")
|
||||
tiled: bool = Field(
|
||||
default=False,
|
||||
description="Decode latents by overlaping tiles(less memory consumption)")
|
||||
description="Decode latents by overlapping tiles(less memory consumption)")
|
||||
fp32: bool = Field(DEFAULT_PRECISION=='float32', description="Decode in full precision")
|
||||
metadata: Optional[CoreMetadata] = Field(default=None, description="Optional core metadata to be written to the image")
|
||||
|
||||
|
||||
@@ -2,16 +2,18 @@ from typing import Literal, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (BaseInvocation,
|
||||
BaseInvocationOutput, InvocationConfig,
|
||||
InvocationContext)
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
InvocationConfig,
|
||||
InvocationContext,
|
||||
)
|
||||
from invokeai.app.invocations.controlnet_image_processors import ControlField
|
||||
from invokeai.app.invocations.model import (LoRAModelField, MainModelField,
|
||||
VAEModelField)
|
||||
|
||||
from invokeai.app.invocations.model import LoRAModelField, MainModelField, VAEModelField
|
||||
|
||||
class LoRAMetadataField(BaseModel):
|
||||
"""LoRA metadata for an image generated in InvokeAI."""
|
||||
|
||||
lora: LoRAModelField = Field(description="The LoRA model")
|
||||
weight: float = Field(description="The weight of the LoRA model")
|
||||
|
||||
@@ -19,7 +21,9 @@ class LoRAMetadataField(BaseModel):
|
||||
class CoreMetadata(BaseModel):
|
||||
"""Core generation metadata for an image generated in InvokeAI."""
|
||||
|
||||
generation_mode: str = Field(description="The generation mode that output this image",)
|
||||
generation_mode: str = Field(
|
||||
description="The generation mode that output this image",
|
||||
)
|
||||
positive_prompt: str = Field(description="The positive prompt parameter")
|
||||
negative_prompt: str = Field(description="The negative prompt parameter")
|
||||
width: int = Field(description="The width parameter")
|
||||
@@ -29,10 +33,20 @@ class CoreMetadata(BaseModel):
|
||||
cfg_scale: float = Field(description="The classifier-free guidance scale parameter")
|
||||
steps: int = Field(description="The number of steps used for inference")
|
||||
scheduler: str = Field(description="The scheduler used for inference")
|
||||
clip_skip: int = Field(description="The number of skipped CLIP layers",)
|
||||
clip_skip: int = Field(
|
||||
description="The number of skipped CLIP layers",
|
||||
)
|
||||
model: MainModelField = Field(description="The main model used for inference")
|
||||
controlnets: list[ControlField]= Field(description="The ControlNets used for inference")
|
||||
controlnets: list[ControlField] = Field(
|
||||
description="The ControlNets used for inference"
|
||||
)
|
||||
loras: list[LoRAMetadataField] = Field(description="The LoRAs used for inference")
|
||||
vae: Union[VAEModelField, None] = Field(
|
||||
default=None,
|
||||
description="The VAE used for decoding, if the main model's default was not used",
|
||||
)
|
||||
|
||||
# Latents-to-Latents
|
||||
strength: Union[float, None] = Field(
|
||||
default=None,
|
||||
description="The strength used for latents-to-latents",
|
||||
@@ -40,9 +54,34 @@ class CoreMetadata(BaseModel):
|
||||
init_image: Union[str, None] = Field(
|
||||
default=None, description="The name of the initial image"
|
||||
)
|
||||
vae: Union[VAEModelField, None] = Field(
|
||||
|
||||
# SDXL
|
||||
positive_style_prompt: Union[str, None] = Field(
|
||||
default=None, description="The positive style prompt parameter"
|
||||
)
|
||||
negative_style_prompt: Union[str, None] = Field(
|
||||
default=None, description="The negative style prompt parameter"
|
||||
)
|
||||
|
||||
# SDXL Refiner
|
||||
refiner_model: Union[MainModelField, None] = Field(
|
||||
default=None, description="The SDXL Refiner model used"
|
||||
)
|
||||
refiner_cfg_scale: Union[float, None] = Field(
|
||||
default=None,
|
||||
description="The VAE used for decoding, if the main model's default was not used",
|
||||
description="The classifier-free guidance scale parameter used for the refiner",
|
||||
)
|
||||
refiner_steps: Union[int, None] = Field(
|
||||
default=None, description="The number of steps used for the refiner"
|
||||
)
|
||||
refiner_scheduler: Union[str, None] = Field(
|
||||
default=None, description="The scheduler used for the refiner"
|
||||
)
|
||||
refiner_aesthetic_store: Union[float, None] = Field(
|
||||
default=None, description="The aesthetic score used for the refiner"
|
||||
)
|
||||
refiner_start: Union[float, None] = Field(
|
||||
default=None, description="The start value used for refiner denoising"
|
||||
)
|
||||
|
||||
|
||||
@@ -71,7 +110,9 @@ class MetadataAccumulatorInvocation(BaseInvocation):
|
||||
|
||||
type: Literal["metadata_accumulator"] = "metadata_accumulator"
|
||||
|
||||
generation_mode: str = Field(description="The generation mode that output this image",)
|
||||
generation_mode: str = Field(
|
||||
description="The generation mode that output this image",
|
||||
)
|
||||
positive_prompt: str = Field(description="The positive prompt parameter")
|
||||
negative_prompt: str = Field(description="The negative prompt parameter")
|
||||
width: int = Field(description="The width parameter")
|
||||
@@ -81,9 +122,13 @@ class MetadataAccumulatorInvocation(BaseInvocation):
|
||||
cfg_scale: float = Field(description="The classifier-free guidance scale parameter")
|
||||
steps: int = Field(description="The number of steps used for inference")
|
||||
scheduler: str = Field(description="The scheduler used for inference")
|
||||
clip_skip: int = Field(description="The number of skipped CLIP layers",)
|
||||
clip_skip: int = Field(
|
||||
description="The number of skipped CLIP layers",
|
||||
)
|
||||
model: MainModelField = Field(description="The main model used for inference")
|
||||
controlnets: list[ControlField]= Field(description="The ControlNets used for inference")
|
||||
controlnets: list[ControlField] = Field(
|
||||
description="The ControlNets used for inference"
|
||||
)
|
||||
loras: list[LoRAMetadataField] = Field(description="The LoRAs used for inference")
|
||||
strength: Union[float, None] = Field(
|
||||
default=None,
|
||||
@@ -97,36 +142,44 @@ class MetadataAccumulatorInvocation(BaseInvocation):
|
||||
description="The VAE used for decoding, if the main model's default was not used",
|
||||
)
|
||||
|
||||
# SDXL
|
||||
positive_style_prompt: Union[str, None] = Field(
|
||||
default=None, description="The positive style prompt parameter"
|
||||
)
|
||||
negative_style_prompt: Union[str, None] = Field(
|
||||
default=None, description="The negative style prompt parameter"
|
||||
)
|
||||
|
||||
# SDXL Refiner
|
||||
refiner_model: Union[MainModelField, None] = Field(
|
||||
default=None, description="The SDXL Refiner model used"
|
||||
)
|
||||
refiner_cfg_scale: Union[float, None] = Field(
|
||||
default=None,
|
||||
description="The classifier-free guidance scale parameter used for the refiner",
|
||||
)
|
||||
refiner_steps: Union[int, None] = Field(
|
||||
default=None, description="The number of steps used for the refiner"
|
||||
)
|
||||
refiner_scheduler: Union[str, None] = Field(
|
||||
default=None, description="The scheduler used for the refiner"
|
||||
)
|
||||
refiner_aesthetic_store: Union[float, None] = Field(
|
||||
default=None, description="The aesthetic score used for the refiner"
|
||||
)
|
||||
refiner_start: Union[float, None] = Field(
|
||||
default=None, description="The start value used for refiner denoising"
|
||||
)
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Metadata Accumulator",
|
||||
"tags": ["image", "metadata", "generation"]
|
||||
"tags": ["image", "metadata", "generation"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def invoke(self, context: InvocationContext) -> MetadataAccumulatorOutput:
|
||||
"""Collects and outputs a CoreMetadata object"""
|
||||
|
||||
return MetadataAccumulatorOutput(
|
||||
metadata=CoreMetadata(
|
||||
generation_mode=self.generation_mode,
|
||||
positive_prompt=self.positive_prompt,
|
||||
negative_prompt=self.negative_prompt,
|
||||
width=self.width,
|
||||
height=self.height,
|
||||
seed=self.seed,
|
||||
rand_device=self.rand_device,
|
||||
cfg_scale=self.cfg_scale,
|
||||
steps=self.steps,
|
||||
scheduler=self.scheduler,
|
||||
model=self.model,
|
||||
strength=self.strength,
|
||||
init_image=self.init_image,
|
||||
vae=self.vae,
|
||||
controlnets=self.controlnets,
|
||||
loras=self.loras,
|
||||
clip_skip=self.clip_skip,
|
||||
)
|
||||
)
|
||||
return MetadataAccumulatorOutput(metadata=CoreMetadata(**self.dict()))
|
||||
|
||||
@@ -119,8 +119,8 @@ class NoiseInvocation(BaseInvocation):
|
||||
|
||||
@validator("seed", pre=True)
|
||||
def modulo_seed(cls, v):
|
||||
"""Returns the seed modulo SEED_MAX to ensure it is within the valid range."""
|
||||
return v % SEED_MAX
|
||||
"""Returns the seed modulo (SEED_MAX + 1) to ensure it is within the valid range."""
|
||||
return v % (SEED_MAX + 1)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> NoiseOutput:
|
||||
noise = get_noise(
|
||||
|
||||
@@ -138,7 +138,7 @@ class SDXLRefinerModelLoaderInvocation(BaseInvocation):
|
||||
"ui": {
|
||||
"title": "SDXL Refiner Model Loader",
|
||||
"tags": ["model", "loader", "sdxl_refiner"],
|
||||
"type_hints": {"model": "model"},
|
||||
"type_hints": {"model": "refiner_model"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -295,7 +295,7 @@ class SDXLTextToLatentsInvocation(BaseInvocation):
|
||||
|
||||
|
||||
unet_info = context.services.model_manager.get_model(
|
||||
**self.unet.unet.dict()
|
||||
**self.unet.unet.dict(), context=context
|
||||
)
|
||||
do_classifier_free_guidance = True
|
||||
cross_attention_kwargs = None
|
||||
@@ -463,8 +463,8 @@ class SDXLLatentsToLatentsInvocation(BaseInvocation):
|
||||
unet: UNetField = Field(default=None, description="UNet submodel")
|
||||
latents: Optional[LatentsField] = Field(description="Initial latents")
|
||||
|
||||
denoising_start: float = Field(default=0.0, ge=0, lt=1, description="")
|
||||
denoising_end: float = Field(default=1.0, gt=0, le=1, description="")
|
||||
denoising_start: float = Field(default=0.0, ge=0, le=1, description="")
|
||||
denoising_end: float = Field(default=1.0, ge=0, le=1, description="")
|
||||
|
||||
#control: Union[ControlField, list[ControlField]] = Field(default=None, description="The control to use")
|
||||
#seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
|
||||
@@ -549,13 +549,13 @@ class SDXLLatentsToLatentsInvocation(BaseInvocation):
|
||||
num_inference_steps = num_inference_steps - t_start
|
||||
|
||||
# apply noise(if provided)
|
||||
if self.noise is not None:
|
||||
if self.noise is not None and timesteps.shape[0] > 0:
|
||||
noise = context.services.latents.get(self.noise.latents_name)
|
||||
latents = scheduler.add_noise(latents, noise, timesteps[:1])
|
||||
del noise
|
||||
|
||||
unet_info = context.services.model_manager.get_model(
|
||||
**self.unet.unet.dict()
|
||||
**self.unet.unet.dict(), context=context,
|
||||
)
|
||||
do_classifier_free_guidance = True
|
||||
cross_attention_kwargs = None
|
||||
|
||||
@@ -1,9 +1,80 @@
|
||||
from enum import Enum
|
||||
from typing import Optional, Tuple
|
||||
from typing import Optional, Tuple, Literal
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.util.metaenum import MetaEnum
|
||||
from ..invocations.baseinvocation import (
|
||||
BaseInvocationOutput,
|
||||
InvocationConfig,
|
||||
)
|
||||
|
||||
class ImageField(BaseModel):
|
||||
"""An image field used for passing image objects between invocations"""
|
||||
|
||||
image_name: Optional[str] = Field(default=None, description="The name of the image")
|
||||
|
||||
class Config:
|
||||
schema_extra = {"required": ["image_name"]}
|
||||
|
||||
|
||||
class ColorField(BaseModel):
|
||||
r: int = Field(ge=0, le=255, description="The red component")
|
||||
g: int = Field(ge=0, le=255, description="The green component")
|
||||
b: int = Field(ge=0, le=255, description="The blue component")
|
||||
a: int = Field(ge=0, le=255, description="The alpha component")
|
||||
|
||||
def tuple(self) -> Tuple[int, int, int, int]:
|
||||
return (self.r, self.g, self.b, self.a)
|
||||
|
||||
|
||||
class ProgressImage(BaseModel):
|
||||
"""The progress image sent intermittently during processing"""
|
||||
|
||||
width: int = Field(description="The effective width of the image in pixels")
|
||||
height: int = Field(description="The effective height of the image in pixels")
|
||||
dataURL: str = Field(description="The image data as a b64 data URL")
|
||||
|
||||
class PILInvocationConfig(BaseModel):
|
||||
"""Helper class to provide all PIL invocations with additional config"""
|
||||
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"tags": ["PIL", "image"],
|
||||
},
|
||||
}
|
||||
|
||||
class ImageOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output an image"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["image_output"] = "image_output"
|
||||
image: ImageField = Field(default=None, description="The output image")
|
||||
width: int = Field(description="The width of the image in pixels")
|
||||
height: int = Field(description="The height of the image in pixels")
|
||||
# fmt: on
|
||||
|
||||
class Config:
|
||||
schema_extra = {"required": ["type", "image", "width", "height"]}
|
||||
|
||||
|
||||
class MaskOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output a mask"""
|
||||
|
||||
# fmt: off
|
||||
type: Literal["mask"] = "mask"
|
||||
mask: ImageField = Field(default=None, description="The output mask")
|
||||
width: int = Field(description="The width of the mask in pixels")
|
||||
height: int = Field(description="The height of the mask in pixels")
|
||||
# fmt: on
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
"required": [
|
||||
"type",
|
||||
"mask",
|
||||
]
|
||||
}
|
||||
|
||||
class ResourceOrigin(str, Enum, metaclass=MetaEnum):
|
||||
"""The origin of a resource (eg image).
|
||||
@@ -63,28 +134,3 @@ class InvalidImageCategoryException(ValueError):
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class ImageField(BaseModel):
|
||||
"""An image field used for passing image objects between invocations"""
|
||||
|
||||
image_name: Optional[str] = Field(default=None, description="The name of the image")
|
||||
|
||||
class Config:
|
||||
schema_extra = {"required": ["image_name"]}
|
||||
|
||||
|
||||
class ColorField(BaseModel):
|
||||
r: int = Field(ge=0, le=255, description="The red component")
|
||||
g: int = Field(ge=0, le=255, description="The green component")
|
||||
b: int = Field(ge=0, le=255, description="The blue component")
|
||||
a: int = Field(ge=0, le=255, description="The alpha component")
|
||||
|
||||
def tuple(self) -> Tuple[int, int, int, int]:
|
||||
return (self.r, self.g, self.b, self.a)
|
||||
|
||||
|
||||
class ProgressImage(BaseModel):
|
||||
"""The progress image sent intermittently during processing"""
|
||||
|
||||
width: int = Field(description="The effective width of the image in pixels")
|
||||
height: int = Field(description="The effective height of the image in pixels")
|
||||
dataURL: str = Field(description="The image data as a b64 data URL")
|
||||
|
||||
@@ -28,7 +28,6 @@ InvokeAI:
|
||||
always_use_cpu: false
|
||||
free_gpu_mem: false
|
||||
Features:
|
||||
nsfw_checker: true
|
||||
restore: true
|
||||
esrgan: true
|
||||
patchmatch: true
|
||||
@@ -92,18 +91,18 @@ Typical usage at the top level file:
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
# get global configuration and print its nsfw_checker value
|
||||
# get global configuration and print its cache size
|
||||
conf = InvokeAIAppConfig.get_config()
|
||||
conf.parse_args()
|
||||
print(conf.nsfw_checker)
|
||||
print(conf.max_cache_size)
|
||||
|
||||
Typical usage in a backend module:
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
# get global configuration and print its nsfw_checker value
|
||||
# get global configuration and print its cache size value
|
||||
conf = InvokeAIAppConfig.get_config()
|
||||
print(conf.nsfw_checker)
|
||||
print(conf.max_cache_size)
|
||||
|
||||
|
||||
Computed properties:
|
||||
@@ -277,7 +276,7 @@ class InvokeAISettings(BaseSettings):
|
||||
@classmethod
|
||||
def _excluded_from_yaml(self)->List[str]:
|
||||
# combination of deprecated parameters and internal ones that shouldn't be exposed as invokeai.yaml options
|
||||
return ['type','initconf', 'gpu_mem_reserved', 'max_loaded_models', 'version', 'from_file', 'model', 'restore', 'root']
|
||||
return ['type','initconf', 'gpu_mem_reserved', 'max_loaded_models', 'version', 'from_file', 'model', 'restore', 'root', 'nsfw_checker']
|
||||
|
||||
class Config:
|
||||
env_file_encoding = 'utf-8'
|
||||
@@ -364,7 +363,6 @@ setting environment variables INVOKEAI_<setting>.
|
||||
esrgan : bool = Field(default=True, description="Enable/disable upscaling code", category='Features')
|
||||
internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", category='Features')
|
||||
log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", category='Features')
|
||||
nsfw_checker : bool = Field(default=True, description="Enable/disable the NSFW checker", category='Features')
|
||||
patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", category='Features')
|
||||
restore : bool = Field(default=True, description="Enable/disable face restoration code (DEPRECATED)", category='DEPRECATED')
|
||||
|
||||
@@ -374,6 +372,7 @@ setting environment variables INVOKEAI_<setting>.
|
||||
max_cache_size : float = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance')
|
||||
max_vram_cache_size : float = Field(default=2.75, ge=0, description="Amount of VRAM reserved for model storage", category='Memory/Performance')
|
||||
gpu_mem_reserved : float = Field(default=2.75, ge=0, description="DEPRECATED: use max_vram_cache_size. Amount of VRAM reserved for model storage", category='DEPRECATED')
|
||||
nsfw_checker : bool = Field(default=True, description="DEPRECATED: use Web settings to enable/disable", category='DEPRECATED')
|
||||
precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='auto',description='Floating point precision', category='Memory/Performance')
|
||||
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance')
|
||||
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance')
|
||||
@@ -525,6 +524,16 @@ setting environment variables INVOKEAI_<setting>.
|
||||
"""Return true if patchmatch true"""
|
||||
return self.patchmatch
|
||||
|
||||
@property
|
||||
def nsfw_checker(self)->bool:
|
||||
""" NSFW node is always active and disabled from Web UIe"""
|
||||
return True
|
||||
|
||||
@property
|
||||
def invisible_watermark(self)->bool:
|
||||
""" invisible watermark node is always active and disabled from Web UIe"""
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def find_root()->Path:
|
||||
'''
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from ..invocations.latent import LatentsToImageInvocation, TextToLatentsInvocation
|
||||
from ..invocations.image import ImageNSFWBlurInvocation
|
||||
from ..invocations.noise import NoiseInvocation
|
||||
from ..invocations.compel import CompelInvocation
|
||||
from ..invocations.params import ParamIntInvocation
|
||||
@@ -24,6 +25,7 @@ def create_text_to_image() -> LibraryGraph:
|
||||
'5': CompelInvocation(id='5'),
|
||||
'6': TextToLatentsInvocation(id='6'),
|
||||
'7': LatentsToImageInvocation(id='7'),
|
||||
'8': ImageNSFWBlurInvocation(id='8'),
|
||||
},
|
||||
edges=[
|
||||
Edge(source=EdgeConnection(node_id='width', field='a'), destination=EdgeConnection(node_id='3', field='width')),
|
||||
@@ -33,6 +35,7 @@ def create_text_to_image() -> LibraryGraph:
|
||||
Edge(source=EdgeConnection(node_id='6', field='latents'), destination=EdgeConnection(node_id='7', field='latents')),
|
||||
Edge(source=EdgeConnection(node_id='4', field='conditioning'), destination=EdgeConnection(node_id='6', field='positive_conditioning')),
|
||||
Edge(source=EdgeConnection(node_id='5', field='conditioning'), destination=EdgeConnection(node_id='6', field='negative_conditioning')),
|
||||
Edge(source=EdgeConnection(node_id='7', field='image'), destination=EdgeConnection(node_id='8', field='image')),
|
||||
]
|
||||
),
|
||||
exposed_inputs=[
|
||||
@@ -43,7 +46,7 @@ def create_text_to_image() -> LibraryGraph:
|
||||
ExposedNodeInput(node_path='seed', field='a', alias='seed'),
|
||||
],
|
||||
exposed_outputs=[
|
||||
ExposedNodeOutput(node_path='7', field='image', alias='image')
|
||||
ExposedNodeOutput(node_path='8', field='image', alias='image')
|
||||
])
|
||||
|
||||
|
||||
|
||||
@@ -3,7 +3,13 @@
|
||||
from typing import Any, Optional
|
||||
from invokeai.app.models.image import ProgressImage
|
||||
from invokeai.app.util.misc import get_timestamp
|
||||
from invokeai.app.services.model_manager_service import BaseModelType, ModelType, SubModelType, ModelInfo
|
||||
from invokeai.app.services.model_manager_service import (
|
||||
BaseModelType,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
ModelInfo,
|
||||
)
|
||||
|
||||
|
||||
class EventServiceBase:
|
||||
session_event: str = "session_event"
|
||||
@@ -38,7 +44,9 @@ class EventServiceBase:
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
node=node,
|
||||
source_node_id=source_node_id,
|
||||
progress_image=progress_image.dict() if progress_image is not None else None,
|
||||
progress_image=progress_image.dict()
|
||||
if progress_image is not None
|
||||
else None,
|
||||
step=step,
|
||||
total_steps=total_steps,
|
||||
),
|
||||
@@ -67,6 +75,7 @@ class EventServiceBase:
|
||||
graph_execution_state_id: str,
|
||||
node: dict,
|
||||
source_node_id: str,
|
||||
error_type: str,
|
||||
error: str,
|
||||
) -> None:
|
||||
"""Emitted when an invocation has completed"""
|
||||
@@ -76,6 +85,7 @@ class EventServiceBase:
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
node=node,
|
||||
source_node_id=source_node_id,
|
||||
error_type=error_type,
|
||||
error=error,
|
||||
),
|
||||
)
|
||||
@@ -102,13 +112,13 @@ class EventServiceBase:
|
||||
),
|
||||
)
|
||||
|
||||
def emit_model_load_started (
|
||||
self,
|
||||
graph_execution_state_id: str,
|
||||
model_name: str,
|
||||
base_model: BaseModelType,
|
||||
model_type: ModelType,
|
||||
submodel: SubModelType,
|
||||
def emit_model_load_started(
|
||||
self,
|
||||
graph_execution_state_id: str,
|
||||
model_name: str,
|
||||
base_model: BaseModelType,
|
||||
model_type: ModelType,
|
||||
submodel: SubModelType,
|
||||
) -> None:
|
||||
"""Emitted when a model is requested"""
|
||||
self.__emit_session_event(
|
||||
@@ -123,13 +133,13 @@ class EventServiceBase:
|
||||
)
|
||||
|
||||
def emit_model_load_completed(
|
||||
self,
|
||||
graph_execution_state_id: str,
|
||||
model_name: str,
|
||||
base_model: BaseModelType,
|
||||
model_type: ModelType,
|
||||
submodel: SubModelType,
|
||||
model_info: ModelInfo,
|
||||
self,
|
||||
graph_execution_state_id: str,
|
||||
model_name: str,
|
||||
base_model: BaseModelType,
|
||||
model_type: ModelType,
|
||||
submodel: SubModelType,
|
||||
model_info: ModelInfo,
|
||||
) -> None:
|
||||
"""Emitted when a model is correctly loaded (returns model info)"""
|
||||
self.__emit_session_event(
|
||||
@@ -145,3 +155,37 @@ class EventServiceBase:
|
||||
precision=str(model_info.precision),
|
||||
),
|
||||
)
|
||||
|
||||
def emit_session_retrieval_error(
|
||||
self,
|
||||
graph_execution_state_id: str,
|
||||
error_type: str,
|
||||
error: str,
|
||||
) -> None:
|
||||
"""Emitted when session retrieval fails"""
|
||||
self.__emit_session_event(
|
||||
event_name="session_retrieval_error",
|
||||
payload=dict(
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
error_type=error_type,
|
||||
error=error,
|
||||
),
|
||||
)
|
||||
|
||||
def emit_invocation_retrieval_error(
|
||||
self,
|
||||
graph_execution_state_id: str,
|
||||
node_id: str,
|
||||
error_type: str,
|
||||
error: str,
|
||||
) -> None:
|
||||
"""Emitted when invocation retrieval fails"""
|
||||
self.__emit_session_event(
|
||||
event_name="invocation_retrieval_error",
|
||||
payload=dict(
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
node_id=node_id,
|
||||
error_type=error_type,
|
||||
error=error,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -216,16 +216,13 @@ class ImageService(ImageServiceABC):
|
||||
metadata=metadata,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if board_id is not None:
|
||||
self._services.board_image_records.add_image_to_board(
|
||||
board_id=board_id, image_name=image_name
|
||||
)
|
||||
|
||||
self._services.image_files.save(
|
||||
image_name=image_name, image=image, metadata=metadata, graph=graph
|
||||
)
|
||||
|
||||
image_dto = self.get_dto(image_name)
|
||||
|
||||
return image_dto
|
||||
@@ -236,7 +233,7 @@ class ImageService(ImageServiceABC):
|
||||
self._services.logger.error("Failed to save image file")
|
||||
raise
|
||||
except Exception as e:
|
||||
self._services.logger.error("Problem saving image record and file")
|
||||
self._services.logger.error(f"Problem saving image record and file: {str(e)}")
|
||||
raise e
|
||||
|
||||
def update(
|
||||
|
||||
@@ -600,7 +600,7 @@ class ModelManagerService(ModelManagerServiceBase):
|
||||
"""
|
||||
Return list of all models found in the designated directory.
|
||||
"""
|
||||
search = FindModels(directory,self.logger)
|
||||
search = FindModels([directory], self.logger)
|
||||
return search.list_models()
|
||||
|
||||
def sync_to_config(self):
|
||||
|
||||
@@ -39,21 +39,41 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
try:
|
||||
queue_item: InvocationQueueItem = self.__invoker.services.queue.get()
|
||||
except Exception as e:
|
||||
logger.debug("Exception while getting from queue: %s" % e)
|
||||
self.__invoker.services.logger.error("Exception while getting from queue:\n%s" % e)
|
||||
|
||||
if not queue_item: # Probably stopping
|
||||
# do not hammer the queue
|
||||
time.sleep(0.5)
|
||||
continue
|
||||
|
||||
graph_execution_state = (
|
||||
self.__invoker.services.graph_execution_manager.get(
|
||||
queue_item.graph_execution_state_id
|
||||
try:
|
||||
graph_execution_state = (
|
||||
self.__invoker.services.graph_execution_manager.get(
|
||||
queue_item.graph_execution_state_id
|
||||
)
|
||||
)
|
||||
)
|
||||
invocation = graph_execution_state.execution_graph.get_node(
|
||||
queue_item.invocation_id
|
||||
)
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.error("Exception while retrieving session:\n%s" % e)
|
||||
self.__invoker.services.events.emit_session_retrieval_error(
|
||||
graph_execution_state_id=queue_item.graph_execution_state_id,
|
||||
error_type=e.__class__.__name__,
|
||||
error=traceback.format_exc(),
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
invocation = graph_execution_state.execution_graph.get_node(
|
||||
queue_item.invocation_id
|
||||
)
|
||||
except Exception as e:
|
||||
self.__invoker.services.logger.error("Exception while retrieving invocation:\n%s" % e)
|
||||
self.__invoker.services.events.emit_invocation_retrieval_error(
|
||||
graph_execution_state_id=queue_item.graph_execution_state_id,
|
||||
node_id=queue_item.invocation_id,
|
||||
error_type=e.__class__.__name__,
|
||||
error=traceback.format_exc(),
|
||||
)
|
||||
continue
|
||||
|
||||
# get the source node id to provide to clients (the prepared node id is not as useful)
|
||||
source_node_id = graph_execution_state.prepared_source_mapping[invocation.id]
|
||||
@@ -114,11 +134,13 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
graph_execution_state
|
||||
)
|
||||
|
||||
self.__invoker.services.logger.error("Error while invoking:\n%s" % e)
|
||||
# Send error event
|
||||
self.__invoker.services.events.emit_invocation_error(
|
||||
graph_execution_state_id=graph_execution_state.id,
|
||||
node=invocation.dict(),
|
||||
source_node_id=source_node_id,
|
||||
error_type=e.__class__.__name__,
|
||||
error=error,
|
||||
)
|
||||
|
||||
@@ -136,11 +158,12 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
try:
|
||||
self.__invoker.invoke(graph_execution_state, invoke_all=True)
|
||||
except Exception as e:
|
||||
logger.error("Error while invoking: %s" % e)
|
||||
self.__invoker.services.logger.error("Error while invoking:\n%s" % e)
|
||||
self.__invoker.services.events.emit_invocation_error(
|
||||
graph_execution_state_id=graph_execution_state.id,
|
||||
node=invocation.dict(),
|
||||
source_node_id=source_node_id,
|
||||
error_type=e.__class__.__name__,
|
||||
error=traceback.format_exc()
|
||||
)
|
||||
elif is_complete:
|
||||
|
||||
@@ -14,8 +14,9 @@ def get_datetime_from_iso_timestamp(iso_timestamp: str) -> datetime.datetime:
|
||||
return datetime.datetime.fromisoformat(iso_timestamp)
|
||||
|
||||
|
||||
SEED_MAX = np.iinfo(np.int32).max
|
||||
SEED_MAX = np.iinfo(np.uint32).max
|
||||
|
||||
|
||||
def get_random_seed():
|
||||
return np.random.randint(0, SEED_MAX)
|
||||
rng = np.random.default_rng(seed=0)
|
||||
return int(rng.integers(0, SEED_MAX))
|
||||
|
||||
@@ -12,4 +12,4 @@ from .model_management import (
|
||||
ModelManager, ModelCache, BaseModelType,
|
||||
ModelType, SubModelType, ModelInfo
|
||||
)
|
||||
from .safety_checker import SafetyChecker
|
||||
from .model_management.models import SilenceWarnings
|
||||
|
||||
@@ -28,7 +28,6 @@ from diffusers.schedulers import SchedulerMixin as Scheduler
|
||||
import invokeai.backend.util.logging as logger
|
||||
from ..image_util import configure_model_padding
|
||||
from ..util.util import rand_perlin_2d
|
||||
from ..safety_checker import SafetyChecker
|
||||
from ..stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline
|
||||
from ..stable_diffusion.schedulers import SCHEDULER_MAP
|
||||
|
||||
@@ -52,7 +51,6 @@ class InvokeAIGeneratorBasicParams:
|
||||
v_symmetry_time_pct: Optional[float]=None
|
||||
variation_amount: float = 0.0
|
||||
with_variations: list=field(default_factory=list)
|
||||
safety_checker: Optional[SafetyChecker]=None
|
||||
|
||||
@dataclass
|
||||
class InvokeAIGeneratorOutput:
|
||||
@@ -240,7 +238,6 @@ class Generator:
|
||||
self.seed = None
|
||||
self.latent_channels = model.unet.config.in_channels
|
||||
self.downsampling_factor = downsampling # BUG: should come from model or config
|
||||
self.safety_checker = None
|
||||
self.perlin = 0.0
|
||||
self.threshold = 0
|
||||
self.variation_amount = 0
|
||||
@@ -277,12 +274,10 @@ class Generator:
|
||||
perlin=0.0,
|
||||
h_symmetry_time_pct=None,
|
||||
v_symmetry_time_pct=None,
|
||||
safety_checker: SafetyChecker=None,
|
||||
free_gpu_mem: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
scope = nullcontext
|
||||
self.safety_checker = safety_checker
|
||||
self.free_gpu_mem = free_gpu_mem
|
||||
attention_maps_images = []
|
||||
attention_maps_callback = lambda saver: attention_maps_images.append(
|
||||
@@ -329,9 +324,6 @@ class Generator:
|
||||
# Pass on the seed in case a layer beneath us needs to generate noise on its own.
|
||||
image = make_image(x_T, seed)
|
||||
|
||||
if self.safety_checker is not None:
|
||||
image = self.safety_checker.check(image)
|
||||
|
||||
results.append([image, seed, attention_maps_images])
|
||||
|
||||
if image_callback is not None:
|
||||
|
||||
34
invokeai/backend/image_util/invisible_watermark.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""
|
||||
This module defines a singleton object, "invisible_watermark" that
|
||||
wraps the invisible watermark model. It respects the global "invisible_watermark"
|
||||
configuration variable, that allows the watermarking to be supressed.
|
||||
"""
|
||||
import numpy as np
|
||||
import cv2
|
||||
from PIL import Image
|
||||
from imwatermark import WatermarkEncoder
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
import invokeai.backend.util.logging as logger
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
class InvisibleWatermark:
|
||||
"""
|
||||
Wrapper around InvisibleWatermark module.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def invisible_watermark_available(self) -> bool:
|
||||
return config.invisible_watermark
|
||||
|
||||
@classmethod
|
||||
def add_watermark(self, image: Image, watermark_text:str) -> Image:
|
||||
if not self.invisible_watermark_available():
|
||||
return image
|
||||
logger.debug(f'Applying invisible watermark "{watermark_text}"')
|
||||
bgr = cv2.cvtColor(np.array(image.convert("RGB")), cv2.COLOR_RGB2BGR)
|
||||
encoder = WatermarkEncoder()
|
||||
encoder.set_watermark('bytes', watermark_text.encode('utf-8'))
|
||||
bgr_encoded = encoder.encode(bgr, 'dwtDct')
|
||||
return Image.fromarray(
|
||||
cv2.cvtColor(bgr_encoded, cv2.COLOR_BGR2RGB)
|
||||
).convert("RGBA")
|
||||
63
invokeai/backend/image_util/safety_checker.py
Normal file
@@ -0,0 +1,63 @@
|
||||
"""
|
||||
This module defines a singleton object, "safety_checker" that
|
||||
wraps the safety_checker model. It respects the global "nsfw_checker"
|
||||
configuration variable, that allows the checker to be supressed.
|
||||
"""
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from invokeai.backend import SilenceWarnings
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
import invokeai.backend.util.logging as logger
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
CHECKER_PATH = 'core/convert/stable-diffusion-safety-checker'
|
||||
|
||||
class SafetyChecker:
|
||||
"""
|
||||
Wrapper around SafetyChecker model.
|
||||
"""
|
||||
safety_checker = None
|
||||
feature_extractor = None
|
||||
tried_load: bool = False
|
||||
|
||||
@classmethod
|
||||
def _load_safety_checker(self):
|
||||
if self.tried_load:
|
||||
return
|
||||
|
||||
if config.nsfw_checker:
|
||||
try:
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from transformers import AutoFeatureExtractor
|
||||
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
|
||||
config.models_path / CHECKER_PATH
|
||||
)
|
||||
self.feature_extractor = AutoFeatureExtractor.from_pretrained(
|
||||
config.models_path / CHECKER_PATH)
|
||||
logger.info('NSFW checker initialized')
|
||||
except Exception as e:
|
||||
logger.warning(f'Could not load NSFW checker: {str(e)}')
|
||||
else:
|
||||
logger.info('NSFW checker loading disabled')
|
||||
self.tried_load = True
|
||||
|
||||
@classmethod
|
||||
def safety_checker_available(self) -> bool:
|
||||
self._load_safety_checker()
|
||||
return self.safety_checker is not None
|
||||
|
||||
@classmethod
|
||||
def has_nsfw_concept(self, image: Image) -> bool:
|
||||
if not self.safety_checker_available():
|
||||
return False
|
||||
|
||||
device = choose_torch_device()
|
||||
features = self.feature_extractor([image], return_tensors="pt")
|
||||
features.to(device)
|
||||
self.safety_checker.to(device)
|
||||
x_image = np.array(image).astype(np.float32) / 255.0
|
||||
x_image = x_image[None].transpose(0, 3, 1, 2)
|
||||
with SilenceWarnings():
|
||||
checked_image, has_nsfw_concept = self.safety_checker(images=x_image, clip_input=features.pixel_values)
|
||||
return has_nsfw_concept[0]
|
||||
33
invokeai/backend/install/check_root.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""
|
||||
Check that the invokeai_root is correctly configured and exit if not.
|
||||
"""
|
||||
import sys
|
||||
from invokeai.app.services.config import (
|
||||
InvokeAIAppConfig,
|
||||
)
|
||||
|
||||
def check_invokeai_root(config: InvokeAIAppConfig):
|
||||
try:
|
||||
assert config.model_conf_path.exists(), f'{config.model_conf_path} not found'
|
||||
assert config.db_path.parent.exists(), f'{config.db_path.parent} not found'
|
||||
assert config.models_path.exists(), f'{config.models_path} not found'
|
||||
for model in [
|
||||
'CLIP-ViT-bigG-14-laion2B-39B-b160k',
|
||||
'bert-base-uncased',
|
||||
'clip-vit-large-patch14',
|
||||
'sd-vae-ft-mse',
|
||||
'stable-diffusion-2-clip',
|
||||
'stable-diffusion-safety-checker']:
|
||||
path = config.models_path / f'core/convert/{model}'
|
||||
assert path.exists(), f'{path} is missing'
|
||||
except Exception as e:
|
||||
print()
|
||||
print(f'An exception has occurred: {str(e)}')
|
||||
print('== STARTUP ABORTED ==')
|
||||
print('** One or more necessary files is missing from your InvokeAI root directory **')
|
||||
print('** Please rerun the configuration script to fix this problem. **')
|
||||
print('** From the launcher, selection option [7]. **')
|
||||
print('** From the command line, activate the virtual environment and run "invokeai-configure --yes --skip-sd-weights" **')
|
||||
input('Press any key to continue...')
|
||||
sys.exit(0)
|
||||
|
||||
@@ -32,6 +32,7 @@ from omegaconf import OmegaConf
|
||||
from tqdm import tqdm
|
||||
from transformers import (
|
||||
CLIPTextModel,
|
||||
CLIPTextConfig,
|
||||
CLIPTokenizer,
|
||||
AutoFeatureExtractor,
|
||||
BertTokenizerFast,
|
||||
@@ -55,6 +56,7 @@ from invokeai.frontend.install.widgets import (
|
||||
from invokeai.backend.install.legacy_arg_parsing import legacy_parser
|
||||
from invokeai.backend.install.model_install_backend import (
|
||||
hf_download_from_pretrained,
|
||||
hf_download_with_resume,
|
||||
InstallSelections,
|
||||
ModelInstall,
|
||||
)
|
||||
@@ -204,6 +206,15 @@ def download_conversion_models():
|
||||
pipeline = CLIPTextModel.from_pretrained(repo_id, subfolder="text_encoder", **kwargs)
|
||||
pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'text_encoder', safe_serialization=True)
|
||||
|
||||
# sd-xl - tokenizer_2
|
||||
repo_id = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"
|
||||
_, model_name = repo_id.split('/')
|
||||
pipeline = CLIPTokenizer.from_pretrained(repo_id, **kwargs)
|
||||
pipeline.save_pretrained(target_dir / model_name, safe_serialization=True)
|
||||
|
||||
pipeline = CLIPTextConfig.from_pretrained(repo_id, **kwargs)
|
||||
pipeline.save_pretrained(target_dir / model_name, safe_serialization=True)
|
||||
|
||||
# VAE
|
||||
logger.info('Downloading stable diffusion VAE')
|
||||
vae = AutoencoderKL.from_pretrained('stabilityai/sd-vae-ft-mse', **kwargs)
|
||||
@@ -287,47 +298,6 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
||||
color="CONTROL",
|
||||
)
|
||||
|
||||
self.nextrely += 1
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.TitleFixedText,
|
||||
name="== BASIC OPTIONS ==",
|
||||
begin_entry_at=0,
|
||||
editable=False,
|
||||
color="CONTROL",
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely -= 1
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value="Select an output directory for images:",
|
||||
editable=False,
|
||||
color="CONTROL",
|
||||
)
|
||||
self.outdir = self.add_widget_intelligent(
|
||||
npyscreen.TitleFilename,
|
||||
name="(<tab> autocompletes, ctrl-N advances):",
|
||||
value=str(default_output_dir()),
|
||||
select_dir=True,
|
||||
must_exist=False,
|
||||
use_two_lines=False,
|
||||
labelColor="GOOD",
|
||||
begin_entry_at=40,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely += 1
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value="Activate the NSFW checker to blur images showing potential sexual imagery:",
|
||||
editable=False,
|
||||
color="CONTROL",
|
||||
)
|
||||
self.nsfw_checker = self.add_widget_intelligent(
|
||||
npyscreen.Checkbox,
|
||||
name="NSFW checker",
|
||||
value=old_opts.nsfw_checker,
|
||||
relx=5,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely += 1
|
||||
label = """HuggingFace access token (OPTIONAL) for automatic model downloads. See https://huggingface.co/settings/tokens."""
|
||||
for line in textwrap.wrap(label,width=window_width-6):
|
||||
@@ -347,15 +317,6 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely += 1
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.TitleFixedText,
|
||||
name="== ADVANCED OPTIONS ==",
|
||||
begin_entry_at=0,
|
||||
editable=False,
|
||||
color="CONTROL",
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely -= 1
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.TitleFixedText,
|
||||
name="GPU Management",
|
||||
@@ -415,6 +376,18 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
||||
editable=False,
|
||||
color="CONTROL",
|
||||
)
|
||||
self.outdir = self.add_widget_intelligent(
|
||||
FileBox,
|
||||
name="Output directory for images (<tab> autocompletes, ctrl-N advances):",
|
||||
value=str(default_output_dir()),
|
||||
select_dir=True,
|
||||
must_exist=False,
|
||||
use_two_lines=False,
|
||||
labelColor="GOOD",
|
||||
begin_entry_at=40,
|
||||
max_height=3,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.autoimport_dirs = {}
|
||||
self.autoimport_dirs['autoimport_dir'] = self.add_widget_intelligent(
|
||||
FileBox,
|
||||
@@ -506,7 +479,6 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license
|
||||
|
||||
for attr in [
|
||||
"outdir",
|
||||
"nsfw_checker",
|
||||
"free_gpu_mem",
|
||||
"max_cache_size",
|
||||
"xformers_enabled",
|
||||
@@ -542,7 +514,7 @@ class EditOptApplication(npyscreen.NPSAppManaged):
|
||||
"MAIN",
|
||||
editOptsForm,
|
||||
name="InvokeAI Startup Options",
|
||||
cycle_widgets=True,
|
||||
cycle_widgets=False,
|
||||
)
|
||||
if not (self.program_opts.skip_sd_weights or self.program_opts.default_only):
|
||||
self.model_select = self.addForm(
|
||||
@@ -550,7 +522,7 @@ class EditOptApplication(npyscreen.NPSAppManaged):
|
||||
addModelsForm,
|
||||
name="Install Stable Diffusion Models",
|
||||
multipage=True,
|
||||
cycle_widgets=True,
|
||||
cycle_widgets=False,
|
||||
)
|
||||
|
||||
def new_opts(self):
|
||||
@@ -564,8 +536,6 @@ def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Nam
|
||||
|
||||
def default_startup_options(init_file: Path) -> Namespace:
|
||||
opts = InvokeAIAppConfig.get_config()
|
||||
if not init_file.exists():
|
||||
opts.nsfw_checker = True
|
||||
return opts
|
||||
|
||||
def default_user_selections(program_opts: Namespace) -> InstallSelections:
|
||||
@@ -588,7 +558,7 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections:
|
||||
|
||||
# -------------------------------------
|
||||
def initialize_rootdir(root: Path, yes_to_all: bool = False):
|
||||
logger.info("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **")
|
||||
logger.info("Initializing InvokeAI runtime directory")
|
||||
for name in (
|
||||
"models",
|
||||
"databases",
|
||||
@@ -689,7 +659,6 @@ def migrate_init_file(legacy_format:Path):
|
||||
|
||||
# a few places where the field names have changed and we have to
|
||||
# manually add in the new names/values
|
||||
new.nsfw_checker = old.safety_checker
|
||||
new.xformers_enabled = old.xformers
|
||||
new.conf_path = old.conf
|
||||
new.root = legacy_format.parent.resolve()
|
||||
@@ -819,15 +788,14 @@ def main():
|
||||
sys.exit(0)
|
||||
|
||||
if opt.skip_support_models:
|
||||
logger.info("SKIPPING SUPPORT MODEL DOWNLOADS PER USER REQUEST")
|
||||
logger.info("Skipping support models at user's request")
|
||||
else:
|
||||
logger.info("CHECKING/UPDATING SUPPORT MODELS")
|
||||
logger.info("Installing support models")
|
||||
download_support_models()
|
||||
|
||||
if opt.skip_sd_weights:
|
||||
logger.warning("SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST")
|
||||
logger.warning("Skipping diffusion weights download per user request")
|
||||
elif models_to_download:
|
||||
logger.info("DOWNLOADING DIFFUSION WEIGHTS")
|
||||
process_and_execute(opt, models_to_download)
|
||||
|
||||
postscript(errors=errors)
|
||||
|
||||
@@ -58,7 +58,15 @@ LEGACY_CONFIGS = {
|
||||
SchedulerPredictionType.Epsilon: 'v2-inpainting-inference.yaml',
|
||||
SchedulerPredictionType.VPrediction: 'v2-inpainting-inference-v.yaml',
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
BaseModelType.StableDiffusionXL: {
|
||||
ModelVariantType.Normal: 'sd_xl_base.yaml',
|
||||
},
|
||||
|
||||
BaseModelType.StableDiffusionXLRefiner: {
|
||||
ModelVariantType.Normal: 'sd_xl_refiner.yaml',
|
||||
},
|
||||
}
|
||||
|
||||
@dataclass
|
||||
@@ -141,16 +149,17 @@ class ModelInstall(object):
|
||||
for i in installed:
|
||||
print(f"{i['model_name']}\t{i['base_model']}\t{i['path']}")
|
||||
|
||||
def starter_models(self)->Set[str]:
|
||||
# logic here a little reversed to maintain backward compatibility
|
||||
def starter_models(self, all_models: bool=False)->Set[str]:
|
||||
models = set()
|
||||
for key, value in self.datasets.items():
|
||||
name,base,model_type = ModelManager.parse_key(key)
|
||||
if model_type==ModelType.Main:
|
||||
if all_models or model_type==ModelType.Main:
|
||||
models.add(key)
|
||||
return models
|
||||
|
||||
def recommended_models(self)->Set[str]:
|
||||
starters = self.starter_models()
|
||||
starters = self.starter_models(all_models=True)
|
||||
return set([x for x in starters if self.datasets[x].get('recommended',False)])
|
||||
|
||||
def default_model(self)->str:
|
||||
@@ -329,6 +338,7 @@ class ModelInstall(object):
|
||||
description = str(description),
|
||||
model_format = info.format,
|
||||
)
|
||||
legacy_conf = None
|
||||
if info.model_type == ModelType.Main:
|
||||
attributes.update(dict(variant = info.variant_type,))
|
||||
if info.format=="checkpoint":
|
||||
@@ -343,11 +353,17 @@ class ModelInstall(object):
|
||||
except KeyError:
|
||||
legacy_conf = Path(self.config.legacy_conf_dir, 'v1-inference.yaml') # best guess
|
||||
|
||||
attributes.update(
|
||||
dict(
|
||||
config = str(legacy_conf)
|
||||
)
|
||||
if info.model_type == ModelType.ControlNet and info.format=="checkpoint":
|
||||
possible_conf = path.with_suffix('.yaml')
|
||||
if possible_conf.exists():
|
||||
legacy_conf = str(self.relative_to_root(possible_conf))
|
||||
|
||||
if legacy_conf:
|
||||
attributes.update(
|
||||
dict(
|
||||
config = str(legacy_conf)
|
||||
)
|
||||
)
|
||||
return attributes
|
||||
|
||||
def relative_to_root(self, path: Path)->Path:
|
||||
|
||||
@@ -474,7 +474,7 @@ class ModelPatcher:
|
||||
|
||||
@staticmethod
|
||||
def _lora_forward_hook(
|
||||
applied_loras: List[Tuple[LoraModel, float]],
|
||||
applied_loras: List[Tuple[LoRAModel, float]],
|
||||
layer_name: str,
|
||||
):
|
||||
|
||||
@@ -519,7 +519,7 @@ class ModelPatcher:
|
||||
def apply_lora(
|
||||
cls,
|
||||
model: torch.nn.Module,
|
||||
loras: List[Tuple[LoraModel, float]],
|
||||
loras: List[Tuple[LoRAModel, float]],
|
||||
prefix: str,
|
||||
):
|
||||
original_weights = dict()
|
||||
|
||||
@@ -673,6 +673,7 @@ class ModelManager(object):
|
||||
|
||||
self.models[model_key] = model_config
|
||||
self.commit()
|
||||
|
||||
return AddModelResult(
|
||||
name = model_name,
|
||||
model_type = model_type,
|
||||
@@ -840,7 +841,7 @@ class ModelManager(object):
|
||||
Returns the preamble for the config file.
|
||||
"""
|
||||
return textwrap.dedent(
|
||||
"""\
|
||||
"""
|
||||
# This file describes the alternative machine learning models
|
||||
# available to InvokeAI script.
|
||||
#
|
||||
|
||||
@@ -253,10 +253,13 @@ class PipelineCheckpointProbe(CheckpointProbeBase):
|
||||
return BaseModelType.StableDiffusion1
|
||||
if key_name in state_dict and state_dict[key_name].shape[-1] == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
# TODO: Verify that this is correct! Need an XL checkpoint file for this.
|
||||
key_name = 'model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_k.weight'
|
||||
if key_name in state_dict and state_dict[key_name].shape[-1] == 2048:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
raise InvalidModelException("Cannot determine base type")
|
||||
elif key_name in state_dict and state_dict[key_name].shape[-1] == 1280:
|
||||
return BaseModelType.StableDiffusionXLRefiner
|
||||
else:
|
||||
raise InvalidModelException("Cannot determine base type")
|
||||
|
||||
def get_scheduler_prediction_type(self)->SchedulerPredictionType:
|
||||
type = self.get_base_type()
|
||||
|
||||
@@ -98,6 +98,6 @@ class FindModels(ModelSearch):
|
||||
|
||||
def list_models(self) -> List[Path]:
|
||||
self.search()
|
||||
return self.models_found
|
||||
return list(self.models_found)
|
||||
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import os
|
||||
import torch
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
from pathlib import Path
|
||||
from typing import Optional, Literal
|
||||
from .base import (
|
||||
ModelBase,
|
||||
ModelConfigBase,
|
||||
@@ -15,6 +16,7 @@ from .base import (
|
||||
InvalidModelException,
|
||||
ModelNotFoundException,
|
||||
)
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
class ControlNetModelFormat(str, Enum):
|
||||
Checkpoint = "checkpoint"
|
||||
@@ -24,8 +26,12 @@ class ControlNetModel(ModelBase):
|
||||
#model_class: Type
|
||||
#model_size: int
|
||||
|
||||
class Config(ModelConfigBase):
|
||||
model_format: ControlNetModelFormat
|
||||
class DiffusersConfig(ModelConfigBase):
|
||||
model_format: Literal[ControlNetModelFormat.Diffusers]
|
||||
|
||||
class CheckpointConfig(ModelConfigBase):
|
||||
model_format: Literal[ControlNetModelFormat.Checkpoint]
|
||||
config: str
|
||||
|
||||
def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType):
|
||||
assert model_type == ModelType.ControlNet
|
||||
@@ -99,13 +105,51 @@ class ControlNetModel(ModelBase):
|
||||
|
||||
@classmethod
|
||||
def convert_if_required(
|
||||
cls,
|
||||
model_path: str,
|
||||
output_path: str,
|
||||
config: ModelConfigBase,
|
||||
base_model: BaseModelType,
|
||||
) -> str:
|
||||
if cls.detect_format(model_path) == ControlNetModelFormat.Checkpoint:
|
||||
return _convert_controlnet_ckpt_and_cache(
|
||||
model_path = model_path,
|
||||
model_config = config.config,
|
||||
output_path = output_path,
|
||||
base_model = base_model,
|
||||
)
|
||||
else:
|
||||
return model_path
|
||||
|
||||
@classmethod
|
||||
def _convert_controlnet_ckpt_and_cache(
|
||||
cls,
|
||||
model_path: str,
|
||||
output_path: str,
|
||||
config: ModelConfigBase, # empty config or config of parent model
|
||||
base_model: BaseModelType,
|
||||
) -> str:
|
||||
if cls.detect_format(model_path) != ControlNetModelFormat.Diffusers:
|
||||
raise NotImplementedError("Checkpoint controlnet models currently unsupported")
|
||||
else:
|
||||
return model_path
|
||||
model_config: ControlNetModel.CheckpointConfig,
|
||||
) -> str:
|
||||
"""
|
||||
Convert the controlnet from checkpoint format to diffusers format,
|
||||
cache it to disk, and return Path to converted
|
||||
file. If already on disk then just returns Path.
|
||||
"""
|
||||
app_config = InvokeAIAppConfig.get_config()
|
||||
weights = app_config.root_path / model_path
|
||||
output_path = Path(output_path)
|
||||
|
||||
# return cached version if it exists
|
||||
if output_path.exists():
|
||||
return output_path
|
||||
|
||||
# to avoid circular import errors
|
||||
from ..convert_ckpt_to_diffusers import convert_controlnet_to_diffusers
|
||||
convert_controlnet_to_diffusers(
|
||||
weights,
|
||||
output_path,
|
||||
original_config_file = app_config.root_path / model_config,
|
||||
image_size = 512,
|
||||
scan_needed = True,
|
||||
from_safetensors = weights.suffix == ".safetensors"
|
||||
)
|
||||
return output_path
|
||||
|
||||
@@ -10,6 +10,7 @@ from .base import (
|
||||
SubModelType,
|
||||
classproperty,
|
||||
InvalidModelException,
|
||||
ModelNotFoundException,
|
||||
)
|
||||
# TODO: naming
|
||||
from ..lora import LoRAModel as LoRAModelRaw
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import os
|
||||
import json
|
||||
import invokeai.backend.util.logging as logger
|
||||
from enum import Enum
|
||||
from pydantic import Field
|
||||
from typing import Literal, Optional
|
||||
@@ -48,7 +49,7 @@ class StableDiffusionXLModel(DiffusersModel):
|
||||
if model_format == StableDiffusionXLModelFormat.Checkpoint:
|
||||
if ckpt_config_path:
|
||||
ckpt_config = OmegaConf.load(ckpt_config_path)
|
||||
ckpt_config["model"]["params"]["unet_config"]["params"]["in_channels"]
|
||||
in_channels = ckpt_config["model"]["params"]["unet_config"]["params"]["in_channels"]
|
||||
|
||||
else:
|
||||
checkpoint = read_checkpoint_meta(path)
|
||||
@@ -108,7 +109,20 @@ class StableDiffusionXLModel(DiffusersModel):
|
||||
config: ModelConfigBase,
|
||||
base_model: BaseModelType,
|
||||
) -> str:
|
||||
# The convert script adapted from the diffusers package uses
|
||||
# strings for the base model type. To avoid making too many
|
||||
# source code changes, we simply translate here
|
||||
model_base_to_model_type = {BaseModelType.StableDiffusionXL: 'SDXL',
|
||||
BaseModelType.StableDiffusionXLRefiner: 'SDXL-Refiner',
|
||||
}
|
||||
if isinstance(config, cls.CheckpointConfig):
|
||||
raise NotImplementedError('conversion of SDXL checkpoint models to diffusers format is not yet supported')
|
||||
from invokeai.backend.model_management.models.stable_diffusion import _convert_ckpt_and_cache
|
||||
return _convert_ckpt_and_cache(
|
||||
version=base_model,
|
||||
model_config=config,
|
||||
output_path=output_path,
|
||||
model_type=model_base_to_model_type[base_model],
|
||||
use_safetensors=False, # corrupts sdxl models for some reason
|
||||
)
|
||||
else:
|
||||
return model_path
|
||||
|
||||
@@ -15,9 +15,12 @@ from .base import (
|
||||
classproperty,
|
||||
InvalidModelException,
|
||||
)
|
||||
from .sdxl import StableDiffusionXLModel
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from omegaconf import OmegaConf
|
||||
|
||||
|
||||
class StableDiffusion1ModelFormat(str, Enum):
|
||||
Checkpoint = "checkpoint"
|
||||
Diffusers = "diffusers"
|
||||
@@ -235,42 +238,17 @@ class StableDiffusion2Model(DiffusersModel):
|
||||
else:
|
||||
return model_path
|
||||
|
||||
def _select_ckpt_config(version: BaseModelType, variant: ModelVariantType):
|
||||
ckpt_configs = {
|
||||
BaseModelType.StableDiffusion1: {
|
||||
ModelVariantType.Normal: "v1-inference.yaml",
|
||||
ModelVariantType.Inpaint: "v1-inpainting-inference.yaml",
|
||||
},
|
||||
BaseModelType.StableDiffusion2: {
|
||||
ModelVariantType.Normal: "v2-inference-v.yaml", # best guess, as we can't differentiate with base(512)
|
||||
ModelVariantType.Inpaint: "v2-inpainting-inference.yaml",
|
||||
ModelVariantType.Depth: "v2-midas-inference.yaml",
|
||||
},
|
||||
# note that these .yaml files don't yet exist!
|
||||
BaseModelType.StableDiffusionXL: {
|
||||
ModelVariantType.Normal: "xl-inference-v.yaml",
|
||||
ModelVariantType.Inpaint: "xl-inpainting-inference.yaml",
|
||||
ModelVariantType.Depth: "xl-midas-inference.yaml",
|
||||
}
|
||||
}
|
||||
|
||||
app_config = InvokeAIAppConfig.get_config()
|
||||
try:
|
||||
config_path = app_config.legacy_conf_path / ckpt_configs[version][variant]
|
||||
if config_path.is_relative_to(app_config.root_path):
|
||||
config_path = config_path.relative_to(app_config.root_path)
|
||||
return str(config_path)
|
||||
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
# TODO: rework
|
||||
# Note that convert_ckpt_to_diffuses does not currently support conversion of SDXL models
|
||||
# pass precision - currently defaulting to fp16
|
||||
def _convert_ckpt_and_cache(
|
||||
version: BaseModelType,
|
||||
model_config: Union[StableDiffusion1Model.CheckpointConfig, StableDiffusion2Model.CheckpointConfig],
|
||||
output_path: str,
|
||||
version: BaseModelType,
|
||||
model_config: Union[StableDiffusion1Model.CheckpointConfig,
|
||||
StableDiffusion2Model.CheckpointConfig,
|
||||
StableDiffusionXLModel.CheckpointConfig,
|
||||
],
|
||||
output_path: str,
|
||||
use_save_model: bool=False,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
"""
|
||||
Convert the checkpoint model indicated in mconfig into a
|
||||
@@ -289,6 +267,9 @@ def _convert_ckpt_and_cache(
|
||||
|
||||
# to avoid circular import errors
|
||||
from ..convert_ckpt_to_diffusers import convert_ckpt_to_diffusers
|
||||
from ...util.devices import choose_torch_device, torch_dtype
|
||||
|
||||
logger.info(f'Converting {weights} to diffusers format')
|
||||
with SilenceWarnings():
|
||||
convert_ckpt_to_diffusers(
|
||||
weights,
|
||||
@@ -298,5 +279,43 @@ def _convert_ckpt_and_cache(
|
||||
original_config_file=config_file,
|
||||
extract_ema=True,
|
||||
scan_needed=True,
|
||||
from_safetensors = weights.suffix == ".safetensors",
|
||||
precision = torch_dtype(choose_torch_device()),
|
||||
**kwargs,
|
||||
)
|
||||
return output_path
|
||||
|
||||
def _select_ckpt_config(version: BaseModelType, variant: ModelVariantType):
|
||||
ckpt_configs = {
|
||||
BaseModelType.StableDiffusion1: {
|
||||
ModelVariantType.Normal: "v1-inference.yaml",
|
||||
ModelVariantType.Inpaint: "v1-inpainting-inference.yaml",
|
||||
},
|
||||
BaseModelType.StableDiffusion2: {
|
||||
ModelVariantType.Normal: "v2-inference-v.yaml", # best guess, as we can't differentiate with base(512)
|
||||
ModelVariantType.Inpaint: "v2-inpainting-inference.yaml",
|
||||
ModelVariantType.Depth: "v2-midas-inference.yaml",
|
||||
},
|
||||
BaseModelType.StableDiffusionXL: {
|
||||
ModelVariantType.Normal: "sd_xl_base.yaml",
|
||||
ModelVariantType.Inpaint: None,
|
||||
ModelVariantType.Depth: None,
|
||||
},
|
||||
BaseModelType.StableDiffusionXLRefiner: {
|
||||
ModelVariantType.Normal: "sd_xl_refiner.yaml",
|
||||
ModelVariantType.Inpaint: None,
|
||||
ModelVariantType.Depth: None,
|
||||
},
|
||||
}
|
||||
|
||||
app_config = InvokeAIAppConfig.get_config()
|
||||
try:
|
||||
config_path = app_config.legacy_conf_path / ckpt_configs[version][variant]
|
||||
if config_path.is_relative_to(app_config.root_path):
|
||||
config_path = config_path.relative_to(app_config.root_path)
|
||||
return str(config_path)
|
||||
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
'''
|
||||
SafetyChecker class - checks images against the StabilityAI NSFW filter
|
||||
and blurs images that contain potential NSFW content.
|
||||
'''
|
||||
import diffusers
|
||||
import numpy as np
|
||||
import torch
|
||||
import traceback
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import (
|
||||
StableDiffusionSafetyChecker,
|
||||
)
|
||||
from pathlib import Path
|
||||
from PIL import Image, ImageFilter
|
||||
from transformers import AutoFeatureExtractor
|
||||
|
||||
import invokeai.assets.web as web_assets
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from .util import CPU_DEVICE
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
class SafetyChecker(object):
|
||||
CAUTION_IMG = "caution.png"
|
||||
|
||||
def __init__(self, device: torch.device):
|
||||
path = Path(web_assets.__path__[0]) / self.CAUTION_IMG
|
||||
caution = Image.open(path)
|
||||
self.caution_img = caution.resize((caution.width // 2, caution.height // 2))
|
||||
self.device = device
|
||||
|
||||
try:
|
||||
safety_model_id = config.models_path / 'core/convert/stable-diffusion-safety-checker'
|
||||
feature_extractor_id = config.models_path / 'core/convert/stable-diffusion-safety-checker-extractor'
|
||||
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id)
|
||||
self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained(feature_extractor_id)
|
||||
except Exception:
|
||||
logger.error(
|
||||
"An error was encountered while installing the safety checker:"
|
||||
)
|
||||
print(traceback.format_exc())
|
||||
|
||||
def check(self, image: Image.Image):
|
||||
"""
|
||||
Check provided image against the StabilityAI safety checker and return
|
||||
|
||||
"""
|
||||
|
||||
self.safety_checker.to(self.device)
|
||||
features = self.safety_feature_extractor([image], return_tensors="pt")
|
||||
features.to(self.device)
|
||||
|
||||
# unfortunately checker requires the numpy version, so we have to convert back
|
||||
x_image = np.array(image).astype(np.float32) / 255.0
|
||||
x_image = x_image[None].transpose(0, 3, 1, 2)
|
||||
|
||||
diffusers.logging.set_verbosity_error()
|
||||
checked_image, has_nsfw_concept = self.safety_checker(
|
||||
images=x_image, clip_input=features.pixel_values
|
||||
)
|
||||
self.safety_checker.to(CPU_DEVICE) # offload
|
||||
if has_nsfw_concept[0]:
|
||||
logger.warning(
|
||||
"An image with potential non-safe content has been detected. A blurred image will be returned."
|
||||
)
|
||||
return self.blur(image)
|
||||
else:
|
||||
return image
|
||||
|
||||
def blur(self, input):
|
||||
blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
try:
|
||||
if caution := self.caution_img:
|
||||
blurry.paste(caution, (0, 0), caution)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
return blurry
|
||||
@@ -1,7 +1,7 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein and The InvokeAI Development Team
|
||||
|
||||
"""
|
||||
invokeai.util.logging
|
||||
invokeai.backend.util.logging
|
||||
|
||||
Logging class for InvokeAI that produces console messages
|
||||
|
||||
|
||||
@@ -16,13 +16,13 @@ sd-2/main/stable-diffusion-2-inpainting:
|
||||
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
|
||||
repo_id: stabilityai/stable-diffusion-2-inpainting
|
||||
recommended: False
|
||||
sdxl/main/stable-diffusion-xl-base-0-9:
|
||||
description: Stable Diffusion XL base model (12 GB; access token required)
|
||||
repo_id: stabilityai/stable-diffusion-xl-base-0.9
|
||||
sdxl/main/stable-diffusion-xl-base-1-0:
|
||||
description: Stable Diffusion XL base model (12 GB)
|
||||
repo_id: stabilityai/stable-diffusion-xl-base-1.0
|
||||
recommended: False
|
||||
sdxl-refiner/main/stable-diffusion-xl-refiner-0-9:
|
||||
description: Stable Diffusion XL refiner model (12 GB; access token required)
|
||||
repo_id: stabilityai/stable-diffusion-xl-refiner-0.9
|
||||
sdxl-refiner/main/stable-diffusion-xl-refiner-1-0:
|
||||
description: Stable Diffusion XL refiner model (12 GB)
|
||||
repo_id: stabilityai/stable-diffusion-xl-refiner-1.0
|
||||
recommended: False
|
||||
sd-1/main/Analog-Diffusion:
|
||||
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
||||
|
||||
98
invokeai/configs/stable-diffusion/sd_xl_base.yaml
Normal file
@@ -0,0 +1,98 @@
|
||||
model:
|
||||
target: sgm.models.diffusion.DiffusionEngine
|
||||
params:
|
||||
scale_factor: 0.13025
|
||||
disable_first_stage_autocast: True
|
||||
|
||||
denoiser_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
||||
params:
|
||||
num_idx: 1000
|
||||
|
||||
weighting_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
|
||||
scaling_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
|
||||
discretization_config:
|
||||
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
||||
|
||||
network_config:
|
||||
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
adm_in_channels: 2816
|
||||
num_classes: sequential
|
||||
use_checkpoint: True
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [4, 2]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [1, 2, 4]
|
||||
num_head_channels: 64
|
||||
use_spatial_transformer: True
|
||||
use_linear_in_transformer: True
|
||||
transformer_depth: [1, 2, 10] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
|
||||
context_dim: 2048
|
||||
spatial_transformer_attn_type: softmax-xformers
|
||||
legacy: False
|
||||
|
||||
conditioner_config:
|
||||
target: sgm.modules.GeneralConditioner
|
||||
params:
|
||||
emb_models:
|
||||
# crossattn cond
|
||||
- is_trainable: False
|
||||
input_key: txt
|
||||
target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
|
||||
params:
|
||||
layer: hidden
|
||||
layer_idx: 11
|
||||
# crossattn and vector cond
|
||||
- is_trainable: False
|
||||
input_key: txt
|
||||
target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
|
||||
params:
|
||||
arch: ViT-bigG-14
|
||||
version: laion2b_s39b_b160k
|
||||
freeze: True
|
||||
layer: penultimate
|
||||
always_return_pooled: True
|
||||
legacy: False
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: original_size_as_tuple
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: crop_coords_top_left
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: target_size_as_tuple
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
|
||||
first_stage_config:
|
||||
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
attn_type: vanilla-xformers
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
91
invokeai/configs/stable-diffusion/sd_xl_refiner.yaml
Normal file
@@ -0,0 +1,91 @@
|
||||
model:
|
||||
target: sgm.models.diffusion.DiffusionEngine
|
||||
params:
|
||||
scale_factor: 0.13025
|
||||
disable_first_stage_autocast: True
|
||||
|
||||
denoiser_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
||||
params:
|
||||
num_idx: 1000
|
||||
|
||||
weighting_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
|
||||
scaling_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
|
||||
discretization_config:
|
||||
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
||||
|
||||
network_config:
|
||||
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
adm_in_channels: 2560
|
||||
num_classes: sequential
|
||||
use_checkpoint: True
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 384
|
||||
attention_resolutions: [4, 2]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [1, 2, 4, 4]
|
||||
num_head_channels: 64
|
||||
use_spatial_transformer: True
|
||||
use_linear_in_transformer: True
|
||||
transformer_depth: 4
|
||||
context_dim: [1280, 1280, 1280, 1280] # 1280
|
||||
spatial_transformer_attn_type: softmax-xformers
|
||||
legacy: False
|
||||
|
||||
conditioner_config:
|
||||
target: sgm.modules.GeneralConditioner
|
||||
params:
|
||||
emb_models:
|
||||
# crossattn and vector cond
|
||||
- is_trainable: False
|
||||
input_key: txt
|
||||
target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
|
||||
params:
|
||||
arch: ViT-bigG-14
|
||||
version: laion2b_s39b_b160k
|
||||
legacy: False
|
||||
freeze: True
|
||||
layer: penultimate
|
||||
always_return_pooled: True
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: original_size_as_tuple
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: crop_coords_top_left
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: aesthetic_score
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by one
|
||||
|
||||
first_stage_config:
|
||||
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
attn_type: vanilla-xformers
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
@@ -553,7 +553,7 @@ class AddModelApplication(npyscreen.NPSAppManaged):
|
||||
def onStart(self):
|
||||
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
||||
self.main_form = self.addForm(
|
||||
"MAIN", addModelsForm, name="Install Stable Diffusion Models", cycle_widgets=True,
|
||||
"MAIN", addModelsForm, name="Install Stable Diffusion Models", cycle_widgets=False,
|
||||
)
|
||||
|
||||
class StderrToMessage():
|
||||
|
||||
@@ -5,6 +5,10 @@ patches/
|
||||
stats.html
|
||||
index.html
|
||||
.yarn/
|
||||
.yalc/
|
||||
*.scss
|
||||
src/services/api/
|
||||
src/services/fixtures/*
|
||||
docs/
|
||||
static/
|
||||
src/theme/css/overlayscrollbars.css
|
||||
|
||||
169
invokeai/frontend/web/dist/assets/App-06ea4e5e.js
vendored
169
invokeai/frontend/web/dist/assets/App-69e5ea36.js
vendored
Normal file
1
invokeai/frontend/web/dist/assets/MantineProvider-8184f020.js
vendored
Normal file
125
invokeai/frontend/web/dist/assets/index-89941396.js
vendored
Normal file
125
invokeai/frontend/web/dist/assets/index-e2437518.js
vendored
2
invokeai/frontend/web/dist/index.html
vendored
@@ -12,7 +12,7 @@
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
<script type="module" crossorigin src="./assets/index-e2437518.js"></script>
|
||||
<script type="module" crossorigin src="./assets/index-89941396.js"></script>
|
||||
</head>
|
||||
|
||||
<body dir="ltr">
|
||||
|
||||
16
invokeai/frontend/web/dist/locales/en.json
vendored
@@ -23,6 +23,7 @@
|
||||
"menu": "Menu"
|
||||
},
|
||||
"common": {
|
||||
"communityLabel": "Community",
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"darkMode": "Dark Mode",
|
||||
"lightMode": "Light Mode",
|
||||
@@ -102,8 +103,7 @@
|
||||
"openInNewTab": "Open in New Tab",
|
||||
"dontAskMeAgain": "Don't ask me again",
|
||||
"areYouSure": "Are you sure?",
|
||||
"imagePrompt": "Image Prompt",
|
||||
"clearNodes": "Are you sure you want to clear all nodes?"
|
||||
"imagePrompt": "Image Prompt"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Generations",
|
||||
@@ -615,6 +615,11 @@
|
||||
"initialImageNotSetDesc": "Could not load initial image",
|
||||
"nodesSaved": "Nodes Saved",
|
||||
"nodesLoaded": "Nodes Loaded",
|
||||
"nodesNotValidGraph": "Not a valid InvokeAI Node Graph",
|
||||
"nodesNotValidJSON": "Not a valid JSON",
|
||||
"nodesCorruptedGraph": "Cannot load. Graph seems to be corrupted.",
|
||||
"nodesUnrecognizedTypes": "Cannot load. Graph has unrecognized types",
|
||||
"nodesBrokenConnections": "Cannot load. Some connections are broken.",
|
||||
"nodesLoadedFailed": "Failed To Load Nodes",
|
||||
"nodesCleared": "Nodes Cleared"
|
||||
},
|
||||
@@ -700,9 +705,10 @@
|
||||
},
|
||||
"nodes": {
|
||||
"reloadSchema": "Reload Schema",
|
||||
"saveNodes": "Save Nodes",
|
||||
"loadNodes": "Load Nodes",
|
||||
"clearNodes": "Clear Nodes",
|
||||
"saveGraph": "Save Graph",
|
||||
"loadGraph": "Load Graph (saved from Node Editor) (Do not copy-paste metadata)",
|
||||
"clearGraph": "Clear Graph",
|
||||
"clearGraphDesc": "Are you sure you want to clear all nodes?",
|
||||
"zoomInNodes": "Zoom In",
|
||||
"zoomOutNodes": "Zoom Out",
|
||||
"fitViewportNodes": "Fit View",
|
||||
|
||||
@@ -53,11 +53,11 @@
|
||||
]
|
||||
},
|
||||
"dependencies": {
|
||||
"@chakra-ui/anatomy": "^2.1.1",
|
||||
"@chakra-ui/anatomy": "^2.2.0",
|
||||
"@chakra-ui/icons": "^2.0.19",
|
||||
"@chakra-ui/react": "^2.7.1",
|
||||
"@chakra-ui/react": "^2.8.0",
|
||||
"@chakra-ui/styled-system": "^2.9.1",
|
||||
"@chakra-ui/theme-tools": "^2.0.18",
|
||||
"@chakra-ui/theme-tools": "^2.1.0",
|
||||
"@dagrejs/graphlib": "^2.1.13",
|
||||
"@dnd-kit/core": "^6.0.8",
|
||||
"@dnd-kit/modifiers": "^6.0.1",
|
||||
@@ -69,6 +69,7 @@
|
||||
"@mantine/core": "^6.0.14",
|
||||
"@mantine/form": "^6.0.15",
|
||||
"@mantine/hooks": "^6.0.14",
|
||||
"@nanostores/react": "^0.7.1",
|
||||
"@reduxjs/toolkit": "^1.9.5",
|
||||
"@roarr/browser-log-writer": "^1.1.5",
|
||||
"chakra-ui-contextmenu": "^1.0.5",
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
"menu": "Menu"
|
||||
},
|
||||
"common": {
|
||||
"communityLabel": "Community",
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"darkMode": "Dark Mode",
|
||||
"lightMode": "Light Mode",
|
||||
@@ -102,8 +103,7 @@
|
||||
"openInNewTab": "Open in New Tab",
|
||||
"dontAskMeAgain": "Don't ask me again",
|
||||
"areYouSure": "Are you sure?",
|
||||
"imagePrompt": "Image Prompt",
|
||||
"clearNodes": "Are you sure you want to clear all nodes?"
|
||||
"imagePrompt": "Image Prompt"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Generations",
|
||||
@@ -615,6 +615,11 @@
|
||||
"initialImageNotSetDesc": "Could not load initial image",
|
||||
"nodesSaved": "Nodes Saved",
|
||||
"nodesLoaded": "Nodes Loaded",
|
||||
"nodesNotValidGraph": "Not a valid InvokeAI Node Graph",
|
||||
"nodesNotValidJSON": "Not a valid JSON",
|
||||
"nodesCorruptedGraph": "Cannot load. Graph seems to be corrupted.",
|
||||
"nodesUnrecognizedTypes": "Cannot load. Graph has unrecognized types",
|
||||
"nodesBrokenConnections": "Cannot load. Some connections are broken.",
|
||||
"nodesLoadedFailed": "Failed To Load Nodes",
|
||||
"nodesCleared": "Nodes Cleared"
|
||||
},
|
||||
@@ -700,9 +705,10 @@
|
||||
},
|
||||
"nodes": {
|
||||
"reloadSchema": "Reload Schema",
|
||||
"saveNodes": "Save Nodes",
|
||||
"loadNodes": "Load Nodes",
|
||||
"clearNodes": "Clear Nodes",
|
||||
"saveGraph": "Save Graph",
|
||||
"loadGraph": "Load Graph (saved from Node Editor) (Do not copy-paste metadata)",
|
||||
"clearGraph": "Clear Graph",
|
||||
"clearGraphDesc": "Are you sure you want to clear all nodes?",
|
||||
"zoomInNodes": "Zoom In",
|
||||
"zoomOutNodes": "Zoom Out",
|
||||
"fitViewportNodes": "Fit View",
|
||||
|
||||
@@ -10,7 +10,7 @@ async function main() {
|
||||
);
|
||||
const types = await openapiTS(OPENAPI_URL, {
|
||||
exportType: true,
|
||||
transform: (schemaObject, metadata) => {
|
||||
transform: (schemaObject) => {
|
||||
if ('format' in schemaObject && schemaObject.format === 'binary') {
|
||||
return schemaObject.nullable ? 'Blob | null' : 'Blob';
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import FloatingParametersPanelButtons from 'features/ui/components/FloatingParam
|
||||
import InvokeTabs from 'features/ui/components/InvokeTabs';
|
||||
import ParametersDrawer from 'features/ui/components/ParametersDrawer';
|
||||
import i18n from 'i18n';
|
||||
import { size } from 'lodash-es';
|
||||
import { ReactNode, memo, useEffect } from 'react';
|
||||
import UpdateImageBoardModal from '../../features/gallery/components/Boards/UpdateImageBoardModal';
|
||||
import GlobalHotkeys from './GlobalHotkeys';
|
||||
@@ -29,8 +30,7 @@ interface Props {
|
||||
const App = ({ config = DEFAULT_CONFIG, headerComponent }: Props) => {
|
||||
const language = useAppSelector(languageSelector);
|
||||
|
||||
const log = useLogger();
|
||||
|
||||
const logger = useLogger();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
useEffect(() => {
|
||||
@@ -38,9 +38,11 @@ const App = ({ config = DEFAULT_CONFIG, headerComponent }: Props) => {
|
||||
}, [language]);
|
||||
|
||||
useEffect(() => {
|
||||
log.info({ namespace: 'App', data: config }, 'Received config');
|
||||
dispatch(configChanged(config));
|
||||
}, [dispatch, config, log]);
|
||||
if (size(config)) {
|
||||
logger.info({ namespace: 'App', config }, 'Received config');
|
||||
dispatch(configChanged(config));
|
||||
}
|
||||
}, [dispatch, config, logger]);
|
||||
|
||||
useEffect(() => {
|
||||
dispatch(appStarted());
|
||||
|
||||
@@ -27,7 +27,7 @@ const STYLES: ChakraProps['sx'] = {
|
||||
|
||||
const DragPreview = (props: OverlayDragImageProps) => {
|
||||
if (!props.dragData) {
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
|
||||
if (props.dragData.payloadType === 'IMAGE_DTO') {
|
||||
|
||||
@@ -39,7 +39,6 @@ const ImageDndContext = (props: ImageDndContextProps) => {
|
||||
const handleDragEnd = useCallback(
|
||||
(event: DragEndEvent) => {
|
||||
console.log('dragEnd', event.active.data.current);
|
||||
const activeData = event.active.data.current;
|
||||
const overData = event.over?.data.current;
|
||||
if (!activeDragData || !overData) {
|
||||
return;
|
||||
|
||||
@@ -11,7 +11,7 @@ import {
|
||||
useDraggable as useOriginalDraggable,
|
||||
useDroppable as useOriginalDroppable,
|
||||
} from '@dnd-kit/core';
|
||||
import { BoardId } from 'features/gallery/store/gallerySlice';
|
||||
import { BoardId } from 'features/gallery/store/types';
|
||||
import { ImageDTO } from 'services/api/types';
|
||||
|
||||
type BaseDropData = {
|
||||
|
||||
@@ -1,28 +1,10 @@
|
||||
import { useToast, UseToastOptions } from '@chakra-ui/react';
|
||||
import { useToast } from '@chakra-ui/react';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { toastQueueSelector } from 'features/system/store/systemSelectors';
|
||||
import { addToast, clearToastQueue } from 'features/system/store/systemSlice';
|
||||
import { MakeToastArg, makeToast } from 'features/system/util/makeToast';
|
||||
import { useCallback, useEffect } from 'react';
|
||||
|
||||
export type MakeToastArg = string | UseToastOptions;
|
||||
|
||||
/**
|
||||
* Makes a toast from a string or a UseToastOptions object.
|
||||
* If a string is passed, the toast will have the status 'info' and will be closable with a duration of 2500ms.
|
||||
*/
|
||||
export const makeToast = (arg: MakeToastArg): UseToastOptions => {
|
||||
if (typeof arg === 'string') {
|
||||
return {
|
||||
title: arg,
|
||||
status: 'info',
|
||||
isClosable: true,
|
||||
duration: 2500,
|
||||
};
|
||||
}
|
||||
|
||||
return { status: 'info', isClosable: true, duration: 2500, ...arg };
|
||||
};
|
||||
|
||||
/**
|
||||
* Logical component. Watches the toast queue and makes toasts when the queue is not empty.
|
||||
* @returns null
|
||||
|
||||
@@ -1,66 +1,2 @@
|
||||
// zod needs the array to be `as const` to infer the type correctly
|
||||
|
||||
import { SchedulerParam } from 'features/parameters/types/parameterSchemas';
|
||||
|
||||
// this is the source of the `SchedulerParam` type, which is generated by zod
|
||||
export const SCHEDULER_NAMES_AS_CONST = [
|
||||
'euler',
|
||||
'deis',
|
||||
'ddim',
|
||||
'ddpm',
|
||||
'dpmpp_2s',
|
||||
'dpmpp_2m',
|
||||
'dpmpp_2m_sde',
|
||||
'dpmpp_sde',
|
||||
'heun',
|
||||
'kdpm_2',
|
||||
'lms',
|
||||
'pndm',
|
||||
'unipc',
|
||||
'euler_k',
|
||||
'dpmpp_2s_k',
|
||||
'dpmpp_2m_k',
|
||||
'dpmpp_2m_sde_k',
|
||||
'dpmpp_sde_k',
|
||||
'heun_k',
|
||||
'lms_k',
|
||||
'euler_a',
|
||||
'kdpm_2_a',
|
||||
] as const;
|
||||
|
||||
export const DEFAULT_SCHEDULER_NAME = 'euler';
|
||||
|
||||
export const SCHEDULER_NAMES: SchedulerParam[] = [...SCHEDULER_NAMES_AS_CONST];
|
||||
|
||||
export const SCHEDULER_LABEL_MAP: Record<SchedulerParam, string> = {
|
||||
euler: 'Euler',
|
||||
deis: 'DEIS',
|
||||
ddim: 'DDIM',
|
||||
ddpm: 'DDPM',
|
||||
dpmpp_sde: 'DPM++ SDE',
|
||||
dpmpp_2s: 'DPM++ 2S',
|
||||
dpmpp_2m: 'DPM++ 2M',
|
||||
dpmpp_2m_sde: 'DPM++ 2M SDE',
|
||||
heun: 'Heun',
|
||||
kdpm_2: 'KDPM 2',
|
||||
lms: 'LMS',
|
||||
pndm: 'PNDM',
|
||||
unipc: 'UniPC',
|
||||
euler_k: 'Euler Karras',
|
||||
dpmpp_sde_k: 'DPM++ SDE Karras',
|
||||
dpmpp_2s_k: 'DPM++ 2S Karras',
|
||||
dpmpp_2m_k: 'DPM++ 2M Karras',
|
||||
dpmpp_2m_sde_k: 'DPM++ 2M SDE Karras',
|
||||
heun_k: 'Heun Karras',
|
||||
lms_k: 'LMS Karras',
|
||||
euler_a: 'Euler Ancestral',
|
||||
kdpm_2_a: 'KDPM 2 Ancestral',
|
||||
};
|
||||
|
||||
export type Scheduler = (typeof SCHEDULER_NAMES)[number];
|
||||
|
||||
export const NUMPY_RAND_MIN = 0;
|
||||
|
||||
export const NUMPY_RAND_MAX = 2147483647;
|
||||
|
||||
export const NODE_MIN_WIDTH = 250;
|
||||
export const NUMPY_RAND_MAX = 4294967295;
|
||||
|
||||
46
invokeai/frontend/web/src/app/logging/logger.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import { createLogWriter } from '@roarr/browser-log-writer';
|
||||
import { atom } from 'nanostores';
|
||||
import { Logger, ROARR, Roarr } from 'roarr';
|
||||
|
||||
ROARR.write = createLogWriter();
|
||||
|
||||
export const BASE_CONTEXT = {};
|
||||
export const log = Roarr.child(BASE_CONTEXT);
|
||||
|
||||
export const $logger = atom<Logger>(Roarr.child(BASE_CONTEXT));
|
||||
|
||||
type LoggerNamespace =
|
||||
| 'images'
|
||||
| 'models'
|
||||
| 'config'
|
||||
| 'canvas'
|
||||
| 'txt2img'
|
||||
| 'img2img'
|
||||
| 'nodes'
|
||||
| 'system'
|
||||
| 'socketio'
|
||||
| 'session';
|
||||
|
||||
export const logger = (namespace: LoggerNamespace) =>
|
||||
$logger.get().child({ namespace });
|
||||
|
||||
export const VALID_LOG_LEVELS = [
|
||||
'trace',
|
||||
'debug',
|
||||
'info',
|
||||
'warn',
|
||||
'error',
|
||||
'fatal',
|
||||
] as const;
|
||||
|
||||
export type InvokeLogLevel = (typeof VALID_LOG_LEVELS)[number];
|
||||
|
||||
// Translate human-readable log levels to numbers, used for log filtering
|
||||
export const LOG_LEVEL_MAP: Record<InvokeLogLevel, number> = {
|
||||
trace: 10,
|
||||
debug: 20,
|
||||
info: 30,
|
||||
warn: 40,
|
||||
error: 50,
|
||||
fatal: 60,
|
||||
};
|
||||
@@ -1,48 +1,19 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { createLogWriter } from '@roarr/browser-log-writer';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { systemSelector } from 'features/system/store/systemSelectors';
|
||||
import { isEqual } from 'lodash-es';
|
||||
import { useEffect } from 'react';
|
||||
import { LogLevelName, ROARR, Roarr } from 'roarr';
|
||||
import { createLogWriter } from '@roarr/browser-log-writer';
|
||||
|
||||
// Base logging context includes only the package name
|
||||
const baseContext = { package: '@invoke-ai/invoke-ai-ui' };
|
||||
|
||||
// Create browser log writer
|
||||
ROARR.write = createLogWriter();
|
||||
|
||||
// Module-scoped logger - can be imported and used anywhere
|
||||
export let log = Roarr.child(baseContext);
|
||||
|
||||
// Translate human-readable log levels to numbers, used for log filtering
|
||||
export const LOG_LEVEL_MAP: Record<LogLevelName, number> = {
|
||||
trace: 10,
|
||||
debug: 20,
|
||||
info: 30,
|
||||
warn: 40,
|
||||
error: 50,
|
||||
fatal: 60,
|
||||
};
|
||||
|
||||
export const VALID_LOG_LEVELS = [
|
||||
'trace',
|
||||
'debug',
|
||||
'info',
|
||||
'warn',
|
||||
'error',
|
||||
'fatal',
|
||||
] as const;
|
||||
|
||||
export type InvokeLogLevel = (typeof VALID_LOG_LEVELS)[number];
|
||||
import { ROARR, Roarr } from 'roarr';
|
||||
import { $logger, BASE_CONTEXT, LOG_LEVEL_MAP } from './logger';
|
||||
|
||||
const selector = createSelector(
|
||||
systemSelector,
|
||||
(system) => {
|
||||
const { app_version, consoleLogLevel, shouldLogToConsole } = system;
|
||||
const { consoleLogLevel, shouldLogToConsole } = system;
|
||||
|
||||
return {
|
||||
version: app_version,
|
||||
consoleLogLevel,
|
||||
shouldLogToConsole,
|
||||
};
|
||||
@@ -55,8 +26,7 @@ const selector = createSelector(
|
||||
);
|
||||
|
||||
export const useLogger = () => {
|
||||
const { version, consoleLogLevel, shouldLogToConsole } =
|
||||
useAppSelector(selector);
|
||||
const { consoleLogLevel, shouldLogToConsole } = useAppSelector(selector);
|
||||
|
||||
// The provided Roarr browser log writer uses localStorage to config logging to console
|
||||
useEffect(() => {
|
||||
@@ -78,17 +48,16 @@ export const useLogger = () => {
|
||||
|
||||
// Update the module-scoped logger context as needed
|
||||
useEffect(() => {
|
||||
// TODO: type this properly
|
||||
//eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const newContext: Record<string, any> = {
|
||||
...baseContext,
|
||||
...BASE_CONTEXT,
|
||||
};
|
||||
|
||||
if (version) {
|
||||
newContext.version = version;
|
||||
}
|
||||
$logger.set(Roarr.child(newContext));
|
||||
}, []);
|
||||
|
||||
log = Roarr.child(newContext);
|
||||
}, [version]);
|
||||
const logger = useStore($logger);
|
||||
|
||||
// Use the logger within components - no different than just importing it directly
|
||||
return log;
|
||||
return logger;
|
||||
};
|
||||
|
||||
@@ -12,7 +12,7 @@ import { defaultsDeep } from 'lodash-es';
|
||||
import { UnserializeFunction } from 'redux-remember';
|
||||
|
||||
const initialStates: {
|
||||
[key: string]: any;
|
||||
[key: string]: object; // TODO: type this properly
|
||||
} = {
|
||||
canvas: initialCanvasState,
|
||||
gallery: initialGalleryState,
|
||||
|
||||
@@ -8,10 +8,11 @@ import {
|
||||
|
||||
import type { AppDispatch, RootState } from '../../store';
|
||||
import { addCommitStagingAreaImageListener } from './listeners/addCommitStagingAreaImageListener';
|
||||
import { addFirstListImagesListener } from './listeners/addFirstListImagesListener.ts';
|
||||
import { addAppConfigReceivedListener } from './listeners/appConfigReceived';
|
||||
import { addAppStartedListener } from './listeners/appStarted';
|
||||
import { addBoardIdSelectedListener } from './listeners/boardIdSelected';
|
||||
import { addDeleteBoardAndImagesFulfilledListener } from './listeners/boardAndImagesDeleted';
|
||||
import { addBoardIdSelectedListener } from './listeners/boardIdSelected';
|
||||
import { addCanvasCopiedToClipboardListener } from './listeners/canvasCopiedToClipboard';
|
||||
import { addCanvasDownloadedAsImageListener } from './listeners/canvasDownloadedAsImage';
|
||||
import { addCanvasMergedListener } from './listeners/canvasMerged';
|
||||
@@ -34,10 +35,6 @@ import {
|
||||
addImageRemovedFromBoardRejectedListener,
|
||||
} from './listeners/imageRemovedFromBoard';
|
||||
import { addImageToDeleteSelectedListener } from './listeners/imageToDeleteSelected';
|
||||
import {
|
||||
addImageUpdatedFulfilledListener,
|
||||
addImageUpdatedRejectedListener,
|
||||
} from './listeners/imageUpdated';
|
||||
import {
|
||||
addImageUploadedFulfilledListener,
|
||||
addImageUploadedRejectedListener,
|
||||
@@ -68,18 +65,19 @@ import { addGeneratorProgressEventListener as addGeneratorProgressListener } fro
|
||||
import { addGraphExecutionStateCompleteEventListener as addGraphExecutionStateCompleteListener } from './listeners/socketio/socketGraphExecutionStateComplete';
|
||||
import { addInvocationCompleteEventListener as addInvocationCompleteListener } from './listeners/socketio/socketInvocationComplete';
|
||||
import { addInvocationErrorEventListener as addInvocationErrorListener } from './listeners/socketio/socketInvocationError';
|
||||
import { addInvocationRetrievalErrorEventListener } from './listeners/socketio/socketInvocationRetrievalError';
|
||||
import { addInvocationStartedEventListener as addInvocationStartedListener } from './listeners/socketio/socketInvocationStarted';
|
||||
import { addModelLoadEventListener } from './listeners/socketio/socketModelLoad';
|
||||
import { addSessionRetrievalErrorEventListener } from './listeners/socketio/socketSessionRetrievalError';
|
||||
import { addSocketSubscribedEventListener as addSocketSubscribedListener } from './listeners/socketio/socketSubscribed';
|
||||
import { addSocketUnsubscribedEventListener as addSocketUnsubscribedListener } from './listeners/socketio/socketUnsubscribed';
|
||||
import { addStagingAreaImageSavedListener } from './listeners/stagingAreaImageSaved';
|
||||
import { addTabChangedListener } from './listeners/tabChanged';
|
||||
import { addUpscaleRequestedListener } from './listeners/upscaleRequested';
|
||||
import { addUserInvokedCanvasListener } from './listeners/userInvokedCanvas';
|
||||
import { addUserInvokedImageToImageListener } from './listeners/userInvokedImageToImage';
|
||||
import { addUserInvokedNodesListener } from './listeners/userInvokedNodes';
|
||||
import { addUserInvokedTextToImageListener } from './listeners/userInvokedTextToImage';
|
||||
import { addModelLoadStartedEventListener } from './listeners/socketio/socketModelLoadStarted';
|
||||
import { addModelLoadCompletedEventListener } from './listeners/socketio/socketModelLoadCompleted';
|
||||
import { addUpscaleRequestedListener } from './listeners/upscaleRequested';
|
||||
import { addFirstListImagesListener } from './listeners/addFirstListImagesListener.ts';
|
||||
|
||||
export const listenerMiddleware = createListenerMiddleware();
|
||||
|
||||
@@ -109,10 +107,6 @@ export type AppListenerEffect = ListenerEffect<
|
||||
addImageUploadedFulfilledListener();
|
||||
addImageUploadedRejectedListener();
|
||||
|
||||
// Image updated
|
||||
addImageUpdatedFulfilledListener();
|
||||
addImageUpdatedRejectedListener();
|
||||
|
||||
// Image selected
|
||||
addInitialImageSelectedListener();
|
||||
|
||||
@@ -161,8 +155,9 @@ addSocketConnectedListener();
|
||||
addSocketDisconnectedListener();
|
||||
addSocketSubscribedListener();
|
||||
addSocketUnsubscribedListener();
|
||||
addModelLoadStartedEventListener();
|
||||
addModelLoadCompletedEventListener();
|
||||
addModelLoadEventListener();
|
||||
addSessionRetrievalErrorEventListener();
|
||||
addInvocationRetrievalErrorEventListener();
|
||||
|
||||
// Session Created
|
||||
addSessionCreatedPendingListener();
|
||||
@@ -207,3 +202,6 @@ addFirstListImagesListener();
|
||||
|
||||
// Ad-hoc upscale workflwo
|
||||
addUpscaleRequestedListener();
|
||||
|
||||
// Tab Change
|
||||
addTabChangedListener();
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
import { startAppListening } from '..';
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { commitStagingAreaImage } from 'features/canvas/store/canvasSlice';
|
||||
import { sessionCanceled } from 'services/api/thunks/session';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'canvas' });
|
||||
import { startAppListening } from '..';
|
||||
|
||||
export const addCommitStagingAreaImageListener = () => {
|
||||
startAppListening({
|
||||
actionCreator: commitStagingAreaImage,
|
||||
effect: async (action, { dispatch, getState }) => {
|
||||
const log = logger('canvas');
|
||||
const state = getState();
|
||||
const { sessionId: session_id, isProcessing } = state.system;
|
||||
const canvasSessionId = action.payload;
|
||||
@@ -19,17 +18,15 @@ export const addCommitStagingAreaImageListener = () => {
|
||||
}
|
||||
|
||||
if (!canvasSessionId) {
|
||||
moduleLog.debug('No canvas session, skipping cancel');
|
||||
log.debug('No canvas session, skipping cancel');
|
||||
return;
|
||||
}
|
||||
|
||||
if (canvasSessionId !== session_id) {
|
||||
moduleLog.debug(
|
||||
log.debug(
|
||||
{
|
||||
data: {
|
||||
canvasSessionId,
|
||||
session_id,
|
||||
},
|
||||
canvasSessionId,
|
||||
session_id,
|
||||
},
|
||||
'Canvas session does not match global session, skipping cancel'
|
||||
);
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import {
|
||||
IMAGE_CATEGORIES,
|
||||
imageSelected,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { IMAGE_CATEGORIES } from 'features/gallery/store/types';
|
||||
import {
|
||||
ImageCache,
|
||||
getListImagesUrl,
|
||||
@@ -17,7 +15,7 @@ export const addFirstListImagesListener = () => {
|
||||
matcher: imagesApi.endpoints.listImages.matchFulfilled,
|
||||
effect: async (
|
||||
action,
|
||||
{ getState, dispatch, unsubscribe, cancelActiveListeners }
|
||||
{ dispatch, unsubscribe, cancelActiveListeners }
|
||||
) => {
|
||||
// Only run this listener on the first listImages request for no-board images
|
||||
if (
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
import { setInfillMethod } from 'features/parameters/store/generationSlice';
|
||||
import {
|
||||
shouldUseNSFWCheckerChanged,
|
||||
shouldUseWatermarkerChanged,
|
||||
} from 'features/system/store/systemSlice';
|
||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
import { startAppListening } from '..';
|
||||
|
||||
@@ -6,12 +10,21 @@ export const addAppConfigReceivedListener = () => {
|
||||
startAppListening({
|
||||
matcher: appInfoApi.endpoints.getAppConfig.matchFulfilled,
|
||||
effect: async (action, { getState, dispatch }) => {
|
||||
const { infill_methods } = action.payload;
|
||||
const { infill_methods, nsfw_methods, watermarking_methods } =
|
||||
action.payload;
|
||||
const infillMethod = getState().generation.infillMethod;
|
||||
|
||||
if (!infill_methods.includes(infillMethod)) {
|
||||
dispatch(setInfillMethod(infill_methods[0]));
|
||||
}
|
||||
|
||||
if (!nsfw_methods.includes('nsfw_checker')) {
|
||||
dispatch(shouldUseNSFWCheckerChanged(false));
|
||||
}
|
||||
|
||||
if (!watermarking_methods.includes('invisible_watermark')) {
|
||||
dispatch(shouldUseWatermarkerChanged(false));
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -6,10 +6,7 @@ export const appStarted = createAction('app/appStarted');
|
||||
export const addAppStartedListener = () => {
|
||||
startAppListening({
|
||||
actionCreator: appStarted,
|
||||
effect: async (
|
||||
action,
|
||||
{ getState, dispatch, unsubscribe, cancelActiveListeners }
|
||||
) => {
|
||||
effect: async (action, { unsubscribe, cancelActiveListeners }) => {
|
||||
// this should only run once
|
||||
cancelActiveListeners();
|
||||
unsubscribe();
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { resetCanvas } from 'features/canvas/store/canvasSlice';
|
||||
import { controlNetReset } from 'features/controlNet/store/controlNetSlice';
|
||||
import { getImageUsage } from 'features/imageDeletion/store/imageDeletionSlice';
|
||||
import { getImageUsage } from 'features/imageDeletion/store/imageDeletionSelectors';
|
||||
import { nodeEditorReset } from 'features/nodes/store/nodesSlice';
|
||||
import { clearInitialImage } from 'features/parameters/store/generationSlice';
|
||||
import { startAppListening } from '..';
|
||||
@@ -9,8 +9,8 @@ import { boardsApi } from '../../../../../services/api/endpoints/boards';
|
||||
export const addDeleteBoardAndImagesFulfilledListener = () => {
|
||||
startAppListening({
|
||||
matcher: boardsApi.endpoints.deleteBoardAndImages.matchFulfilled,
|
||||
effect: async (action, { dispatch, getState, condition }) => {
|
||||
const { board_id, deleted_board_images, deleted_images } = action.payload;
|
||||
effect: async (action, { dispatch, getState }) => {
|
||||
const { deleted_images } = action.payload;
|
||||
|
||||
// Remove all deleted images from the UI
|
||||
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import {
|
||||
ASSETS_CATEGORIES,
|
||||
IMAGE_CATEGORIES,
|
||||
boardIdSelected,
|
||||
galleryViewChanged,
|
||||
imageSelected,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import {
|
||||
ASSETS_CATEGORIES,
|
||||
IMAGE_CATEGORIES,
|
||||
} from 'features/gallery/store/types';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { startAppListening } from '..';
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'boards' });
|
||||
|
||||
export const addBoardIdSelectedListener = () => {
|
||||
startAppListening({
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
import { canvasCopiedToClipboard } from 'features/canvas/store/actions';
|
||||
import { startAppListening } from '..';
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { $logger } from 'app/logging/logger';
|
||||
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { copyBlobToClipboard } from 'features/canvas/util/copyBlobToClipboard';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'canvasCopiedToClipboardListener' });
|
||||
|
||||
export const addCanvasCopiedToClipboardListener = () => {
|
||||
startAppListening({
|
||||
actionCreator: canvasCopiedToClipboard,
|
||||
effect: async (action, { dispatch, getState }) => {
|
||||
const moduleLog = $logger
|
||||
.get()
|
||||
.child({ namespace: 'canvasCopiedToClipboardListener' });
|
||||
const state = getState();
|
||||
|
||||
const blob = await getBaseLayerBlob(state);
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
import { canvasDownloadedAsImage } from 'features/canvas/store/actions';
|
||||
import { startAppListening } from '..';
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { $logger } from 'app/logging/logger';
|
||||
import { downloadBlob } from 'features/canvas/util/downloadBlob';
|
||||
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'canvasSavedToGalleryListener' });
|
||||
|
||||
export const addCanvasDownloadedAsImageListener = () => {
|
||||
startAppListening({
|
||||
actionCreator: canvasDownloadedAsImage,
|
||||
effect: async (action, { dispatch, getState }) => {
|
||||
const moduleLog = $logger
|
||||
.get()
|
||||
.child({ namespace: 'canvasSavedToGalleryListener' });
|
||||
const state = getState();
|
||||
|
||||
const blob = await getBaseLayerBlob(state);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { $logger } from 'app/logging/logger';
|
||||
import { canvasMerged } from 'features/canvas/store/actions';
|
||||
import { setMergedCanvas } from 'features/canvas/store/canvasSlice';
|
||||
import { getFullBaseLayerBlob } from 'features/canvas/util/getFullBaseLayerBlob';
|
||||
@@ -7,12 +7,13 @@ import { addToast } from 'features/system/store/systemSlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { startAppListening } from '..';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'canvasCopiedToClipboardListener' });
|
||||
|
||||
export const addCanvasMergedListener = () => {
|
||||
startAppListening({
|
||||
actionCreator: canvasMerged,
|
||||
effect: async (action, { dispatch, getState, take }) => {
|
||||
effect: async (action, { dispatch }) => {
|
||||
const moduleLog = $logger
|
||||
.get()
|
||||
.child({ namespace: 'canvasCopiedToClipboardListener' });
|
||||
const blob = await getFullBaseLayerBlob();
|
||||
|
||||
if (!blob) {
|
||||
|
||||
@@ -1,22 +1,21 @@
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { canvasSavedToGallery } from 'features/canvas/store/actions';
|
||||
import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { startAppListening } from '..';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'canvasSavedToGalleryListener' });
|
||||
|
||||
export const addCanvasSavedToGalleryListener = () => {
|
||||
startAppListening({
|
||||
actionCreator: canvasSavedToGallery,
|
||||
effect: async (action, { dispatch, getState, take }) => {
|
||||
effect: async (action, { dispatch, getState }) => {
|
||||
const log = logger('canvas');
|
||||
const state = getState();
|
||||
|
||||
const blob = await getBaseLayerBlob(state);
|
||||
|
||||
if (!blob) {
|
||||
moduleLog.error('Problem getting base layer blob');
|
||||
log.error('Problem getting base layer blob');
|
||||
dispatch(
|
||||
addToast({
|
||||
title: 'Problem Saving Canvas',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { AnyListenerPredicate } from '@reduxjs/toolkit';
|
||||
import { startAppListening } from '..';
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { RootState } from 'app/store/store';
|
||||
import { controlNetImageProcessed } from 'features/controlNet/store/actions';
|
||||
import {
|
||||
controlNetAutoConfigToggled,
|
||||
@@ -9,9 +9,7 @@ import {
|
||||
controlNetProcessorParamsChanged,
|
||||
controlNetProcessorTypeChanged,
|
||||
} from 'features/controlNet/store/controlNetSlice';
|
||||
import { RootState } from 'app/store/store';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'controlNet' });
|
||||
import { startAppListening } from '..';
|
||||
|
||||
const predicate: AnyListenerPredicate<RootState> = (
|
||||
action,
|
||||
@@ -64,18 +62,13 @@ const predicate: AnyListenerPredicate<RootState> = (
|
||||
export const addControlNetAutoProcessListener = () => {
|
||||
startAppListening({
|
||||
predicate,
|
||||
effect: async (
|
||||
action,
|
||||
{ dispatch, getState, cancelActiveListeners, delay }
|
||||
) => {
|
||||
effect: async (action, { dispatch, cancelActiveListeners, delay }) => {
|
||||
const log = logger('session');
|
||||
const { controlNetId } = action.payload;
|
||||
|
||||
// Cancel any in-progress instances of this listener
|
||||
cancelActiveListeners();
|
||||
moduleLog.trace(
|
||||
{ data: action.payload },
|
||||
'ControlNet auto-process triggered'
|
||||
);
|
||||
log.trace('ControlNet auto-process triggered');
|
||||
// Delay before starting actual work
|
||||
await delay(300);
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { controlNetImageProcessed } from 'features/controlNet/store/actions';
|
||||
import { controlNetProcessedImageChanged } from 'features/controlNet/store/controlNetSlice';
|
||||
import { sessionReadyToInvoke } from 'features/system/store/actions';
|
||||
@@ -9,20 +9,16 @@ import { Graph, ImageDTO } from 'services/api/types';
|
||||
import { socketInvocationComplete } from 'services/events/actions';
|
||||
import { startAppListening } from '..';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'controlNet' });
|
||||
|
||||
export const addControlNetImageProcessedListener = () => {
|
||||
startAppListening({
|
||||
actionCreator: controlNetImageProcessed,
|
||||
effect: async (
|
||||
action,
|
||||
{ dispatch, getState, take, unsubscribe, subscribe }
|
||||
) => {
|
||||
effect: async (action, { dispatch, getState, take }) => {
|
||||
const log = logger('session');
|
||||
const { controlNetId } = action.payload;
|
||||
const controlNet = getState().controlNet.controlNets[controlNetId];
|
||||
|
||||
if (!controlNet.controlImage) {
|
||||
moduleLog.error('Unable to process ControlNet image');
|
||||
log.error('Unable to process ControlNet image');
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -70,8 +66,8 @@ export const addControlNetImageProcessedListener = () => {
|
||||
|
||||
const processedControlImage = payload as ImageDTO;
|
||||
|
||||
moduleLog.debug(
|
||||
{ data: { arg: action.payload, processedControlImage } },
|
||||
log.debug(
|
||||
{ controlNetId: action.payload, processedControlImage },
|
||||
'ControlNet image processed'
|
||||
);
|
||||
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { startAppListening } from '..';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'boards' });
|
||||
|
||||
export const addImageAddedToBoardFulfilledListener = () => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.addImageToBoard.matchFulfilled,
|
||||
effect: (action, { getState, dispatch }) => {
|
||||
effect: (action) => {
|
||||
const log = logger('images');
|
||||
const { board_id, imageDTO } = action.meta.arg.originalArgs;
|
||||
|
||||
// TODO: update listImages cache for this board
|
||||
|
||||
moduleLog.debug({ data: { board_id, imageDTO } }, 'Image added to board');
|
||||
log.debug({ board_id, imageDTO }, 'Image added to board');
|
||||
},
|
||||
});
|
||||
};
|
||||
@@ -20,13 +19,11 @@ export const addImageAddedToBoardFulfilledListener = () => {
|
||||
export const addImageAddedToBoardRejectedListener = () => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.addImageToBoard.matchRejected,
|
||||
effect: (action, { getState, dispatch }) => {
|
||||
effect: (action) => {
|
||||
const log = logger('images');
|
||||
const { board_id, imageDTO } = action.meta.arg.originalArgs;
|
||||
|
||||
moduleLog.debug(
|
||||
{ data: { board_id, imageDTO } },
|
||||
'Problem adding image to board'
|
||||
);
|
||||
log.debug({ board_id, imageDTO }, 'Problem adding image to board');
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { resetCanvas } from 'features/canvas/store/canvasSlice';
|
||||
import { controlNetReset } from 'features/controlNet/store/controlNetSlice';
|
||||
import { selectListImagesBaseQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import {
|
||||
imageDeletionConfirmed,
|
||||
isModalOpenChanged,
|
||||
} from 'features/imageDeletion/store/imageDeletionSlice';
|
||||
import { imageDeletionConfirmed } from 'features/imageDeletion/store/actions';
|
||||
import { isModalOpenChanged } from 'features/imageDeletion/store/imageDeletionSlice';
|
||||
import { nodeEditorReset } from 'features/nodes/store/nodesSlice';
|
||||
import { clearInitialImage } from 'features/parameters/store/generationSlice';
|
||||
import { clamp } from 'lodash-es';
|
||||
@@ -14,8 +12,6 @@ import { api } from 'services/api';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { startAppListening } from '..';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'image' });
|
||||
|
||||
/**
|
||||
* Called when the user requests an image deletion
|
||||
*/
|
||||
@@ -107,7 +103,7 @@ export const addRequestedImageDeletionListener = () => {
|
||||
export const addImageDeletedPendingListener = () => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.deleteImage.matchPending,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
effect: () => {
|
||||
//
|
||||
},
|
||||
});
|
||||
@@ -119,11 +115,9 @@ export const addImageDeletedPendingListener = () => {
|
||||
export const addImageDeletedFulfilledListener = () => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.deleteImage.matchFulfilled,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
moduleLog.debug(
|
||||
{ data: { image: action.meta.arg.originalArgs } },
|
||||
'Image deleted'
|
||||
);
|
||||
effect: (action) => {
|
||||
const log = logger('images');
|
||||
log.debug({ imageDTO: action.meta.arg.originalArgs }, 'Image deleted');
|
||||
},
|
||||
});
|
||||
};
|
||||
@@ -134,9 +128,10 @@ export const addImageDeletedFulfilledListener = () => {
|
||||
export const addImageDeletedRejectedListener = () => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.deleteImage.matchRejected,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
moduleLog.debug(
|
||||
{ data: { image: action.meta.arg.originalArgs } },
|
||||
effect: (action) => {
|
||||
const log = logger('images');
|
||||
log.debug(
|
||||
{ imageDTO: action.meta.arg.originalArgs },
|
||||
'Unable to delete image'
|
||||
);
|
||||
},
|
||||
|
||||
@@ -3,7 +3,7 @@ import {
|
||||
TypesafeDraggableData,
|
||||
TypesafeDroppableData,
|
||||
} from 'app/components/ImageDnd/typesafeDnd';
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice';
|
||||
import { controlNetImageChanged } from 'features/controlNet/store/controlNetSlice';
|
||||
import {
|
||||
@@ -15,8 +15,6 @@ import { initialImageChanged } from 'features/parameters/store/generationSlice';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { startAppListening } from '../';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'dnd' });
|
||||
|
||||
export const dndDropped = createAction<{
|
||||
overData: TypesafeDroppableData;
|
||||
activeData: TypesafeDraggableData;
|
||||
@@ -25,14 +23,11 @@ export const dndDropped = createAction<{
|
||||
export const addImageDroppedListener = () => {
|
||||
startAppListening({
|
||||
actionCreator: dndDropped,
|
||||
effect: async (action, { dispatch, getState, take }) => {
|
||||
effect: async (action, { dispatch }) => {
|
||||
const log = logger('images');
|
||||
const { activeData, overData } = action.payload;
|
||||
const state = getState();
|
||||
|
||||
moduleLog.debug(
|
||||
{ data: { activeData, overData } },
|
||||
'Image or selection dropped'
|
||||
);
|
||||
log.debug({ activeData, overData }, 'Image or selection dropped');
|
||||
|
||||
// set current image
|
||||
if (
|
||||
|
||||
@@ -1,19 +1,15 @@
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { startAppListening } from '..';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'boards' });
|
||||
|
||||
export const addImageRemovedFromBoardFulfilledListener = () => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.removeImageFromBoard.matchFulfilled,
|
||||
effect: (action, { getState, dispatch }) => {
|
||||
const { board_id, image_name } = action.meta.arg.originalArgs;
|
||||
effect: (action) => {
|
||||
const log = logger('images');
|
||||
const imageDTO = action.meta.arg.originalArgs;
|
||||
|
||||
moduleLog.debug(
|
||||
{ data: { board_id, image_name } },
|
||||
'Image added to board'
|
||||
);
|
||||
log.debug({ imageDTO }, 'Image removed from board');
|
||||
},
|
||||
});
|
||||
};
|
||||
@@ -21,13 +17,11 @@ export const addImageRemovedFromBoardFulfilledListener = () => {
|
||||
export const addImageRemovedFromBoardRejectedListener = () => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.removeImageFromBoard.matchRejected,
|
||||
effect: (action, { getState, dispatch }) => {
|
||||
const { board_id, image_name } = action.meta.arg.originalArgs;
|
||||
effect: (action) => {
|
||||
const log = logger('images');
|
||||
const imageDTO = action.meta.arg.originalArgs;
|
||||
|
||||
moduleLog.debug(
|
||||
{ data: { board_id, image_name } },
|
||||
'Problem adding image to board'
|
||||
);
|
||||
log.debug({ imageDTO }, 'Problem removing image from board');
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,18 +1,15 @@
|
||||
import { startAppListening } from '..';
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { imageDeletionConfirmed } from 'features/imageDeletion/store/actions';
|
||||
import { selectImageUsage } from 'features/imageDeletion/store/imageDeletionSelectors';
|
||||
import {
|
||||
imageDeletionConfirmed,
|
||||
imageToDeleteSelected,
|
||||
isModalOpenChanged,
|
||||
selectImageUsage,
|
||||
} from 'features/imageDeletion/store/imageDeletionSlice';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'image' });
|
||||
import { startAppListening } from '..';
|
||||
|
||||
export const addImageToDeleteSelectedListener = () => {
|
||||
startAppListening({
|
||||
actionCreator: imageToDeleteSelected,
|
||||
effect: async (action, { dispatch, getState, condition }) => {
|
||||
effect: async (action, { dispatch, getState }) => {
|
||||
const imageDTO = action.payload;
|
||||
const state = getState();
|
||||
const { shouldConfirmOnDelete } = state.system;
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { startAppListening } from '..';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'image' });
|
||||
|
||||
export const addImageUpdatedFulfilledListener = () => {
|
||||
// startAppListening({
|
||||
// matcher: imagesApi.endpoints.updateImage.matchFulfilled,
|
||||
// effect: (action, { dispatch, getState }) => {
|
||||
// moduleLog.debug(
|
||||
// {
|
||||
// data: {
|
||||
// oldImage: action.meta.arg.originalArgs,
|
||||
// updatedImage: action.payload,
|
||||
// },
|
||||
// },
|
||||
// 'Image updated'
|
||||
// );
|
||||
// },
|
||||
// });
|
||||
};
|
||||
|
||||
export const addImageUpdatedRejectedListener = () => {
|
||||
// startAppListening({
|
||||
// matcher: imagesApi.endpoints.updateImage.matchRejected,
|
||||
// effect: (action, { dispatch }) => {
|
||||
// moduleLog.debug(
|
||||
// { data: action.meta.arg.originalArgs },
|
||||
// 'Image update failed'
|
||||
// );
|
||||
// },
|
||||
// });
|
||||
};
|
||||
@@ -1,5 +1,5 @@
|
||||
import { UseToastOptions } from '@chakra-ui/react';
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice';
|
||||
import { controlNetImageChanged } from 'features/controlNet/store/controlNetSlice';
|
||||
import { imagesAddedToBatch } from 'features/gallery/store/gallerySlice';
|
||||
@@ -9,8 +9,7 @@ import { addToast } from 'features/system/store/systemSlice';
|
||||
import { boardsApi } from 'services/api/endpoints/boards';
|
||||
import { startAppListening } from '..';
|
||||
import { imagesApi } from '../../../../../services/api/endpoints/images';
|
||||
|
||||
const moduleLog = log.child({ namespace: 'image' });
|
||||
import { omit } from 'lodash-es';
|
||||
|
||||
const DEFAULT_UPLOADED_TOAST: UseToastOptions = {
|
||||
title: 'Image Uploaded',
|
||||
@@ -21,11 +20,12 @@ export const addImageUploadedFulfilledListener = () => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.uploadImage.matchFulfilled,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
const log = logger('images');
|
||||
const imageDTO = action.payload;
|
||||
const state = getState();
|
||||
const { selectedBoardId, autoAddBoardId } = state.gallery;
|
||||
const { autoAddBoardId } = state.gallery;
|
||||
|
||||
moduleLog.debug({ arg: '<Blob>', imageDTO }, 'Image uploaded');
|
||||
log.debug({ imageDTO }, 'Image uploaded');
|
||||
|
||||
const { postUploadAction } = action.meta.arg.originalArgs;
|
||||
|
||||
@@ -140,9 +140,14 @@ export const addImageUploadedRejectedListener = () => {
|
||||
startAppListening({
|
||||
matcher: imagesApi.endpoints.uploadImage.matchRejected,
|
||||
effect: (action, { dispatch }) => {
|
||||
const { file, postUploadAction, ...rest } = action.meta.arg.originalArgs;
|
||||
const sanitizedData = { arg: { ...rest, file: '<Blob>' } };
|
||||
moduleLog.error({ data: sanitizedData }, 'Image upload failed');
|
||||
const log = logger('images');
|
||||
const sanitizedData = {
|
||||
arg: {
|
||||
...omit(action.meta.arg.originalArgs, ['file', 'postUploadAction']),
|
||||
file: '<Blob>',
|
||||
},
|
||||
};
|
||||
log.error({ ...sanitizedData }, 'Image upload failed');
|
||||
dispatch(
|
||||
addToast({
|
||||
title: 'Image Upload Failed',
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
import { makeToast } from 'app/components/Toaster';
|
||||
import { initialImageSelected } from 'features/parameters/store/actions';
|
||||
import { initialImageChanged } from 'features/parameters/store/generationSlice';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { makeToast } from 'features/system/util/makeToast';
|
||||
import { t } from 'i18next';
|
||||
import { startAppListening } from '..';
|
||||
|
||||
export const addInitialImageSelectedListener = () => {
|
||||
startAppListening({
|
||||
actionCreator: initialImageSelected,
|
||||
effect: (action, { getState, dispatch }) => {
|
||||
effect: (action, { dispatch }) => {
|
||||
if (!action.payload) {
|
||||
dispatch(
|
||||
addToast(
|
||||
|
||||