mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-15 09:18:00 -05:00
Compare commits
93 Commits
feat/arbit
...
feat/queue
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e6b5c8fd6 | ||
|
|
3e01c396e1 | ||
|
|
0beb08686c | ||
|
|
693c6cf5e4 | ||
|
|
77933a0a85 | ||
|
|
2a087bf161 | ||
|
|
b0fe57ec80 | ||
|
|
09cb40786f | ||
|
|
18ecfc0521 | ||
|
|
59d932e9c1 | ||
|
|
578c8ce5dd | ||
|
|
3d4874dc34 | ||
|
|
5aaf2e8873 | ||
|
|
f3fd0f6d73 | ||
|
|
4468581d2e | ||
|
|
da642b7aad | ||
|
|
b379e3d187 | ||
|
|
6867c79185 | ||
|
|
a1705dc6b3 | ||
|
|
4af4486dd9 | ||
|
|
282a7f32d3 | ||
|
|
4c6a88a642 | ||
|
|
e41d0b9a76 | ||
|
|
a02090b06b | ||
|
|
0d9a546d74 | ||
|
|
8d99113bef | ||
|
|
4309f3bd58 | ||
|
|
42370939a8 | ||
|
|
654591cbf3 | ||
|
|
ad9c954a58 | ||
|
|
a703e1b3d3 | ||
|
|
e85f2254f0 | ||
|
|
8f2cf30191 | ||
|
|
296741306c | ||
|
|
5386a286fd | ||
|
|
803fb393bb | ||
|
|
ab944bd13a | ||
|
|
514c49d946 | ||
|
|
858bcdd3ff | ||
|
|
ed79980dd4 | ||
|
|
86a74e929a | ||
|
|
0d52430481 | ||
|
|
4eca802cdd | ||
|
|
ff0a25bd9c | ||
|
|
ace0eb366b | ||
|
|
d971c5fa64 | ||
|
|
ae82df0fda | ||
|
|
e28262ebd9 | ||
|
|
250ee4b11c | ||
|
|
b7293d638b | ||
|
|
eee863e380 | ||
|
|
e509d719ee | ||
|
|
1d8f44d356 | ||
|
|
7653d21cf5 | ||
|
|
46a2d83b84 | ||
|
|
79efc6789e | ||
|
|
2192210910 | ||
|
|
84629df49c | ||
|
|
ef6b27ab35 | ||
|
|
17420f76b3 | ||
|
|
45213aa631 | ||
|
|
4381dabbd9 | ||
|
|
b4a03fcf42 | ||
|
|
714be33850 | ||
|
|
5f23fc493d | ||
|
|
4fe93e521e | ||
|
|
6e6d903f99 | ||
|
|
667a2a3d84 | ||
|
|
f57b277d5a | ||
|
|
e62991c54d | ||
|
|
785d584603 | ||
|
|
da4aab9233 | ||
|
|
591b601fd3 | ||
|
|
317b5ebae1 | ||
|
|
98a4930a52 | ||
|
|
1a596a5684 | ||
|
|
84a0a0fa14 | ||
|
|
da443973cb | ||
|
|
d073d10f9f | ||
|
|
2b7e7496f7 | ||
|
|
50ab677ea4 | ||
|
|
cb81558302 | ||
|
|
9259483081 | ||
|
|
4ece322f82 | ||
|
|
13e8fa733e | ||
|
|
3e473ae008 | ||
|
|
487fda0226 | ||
|
|
74d3b22533 | ||
|
|
b5e018972f | ||
|
|
2af844385f | ||
|
|
540047e26e | ||
|
|
4d8b8a2db8 | ||
|
|
d581a3289b |
21
Makefile
Normal file
21
Makefile
Normal file
@@ -0,0 +1,21 @@
|
||||
# simple Makefile with scripts that are otherwise hard to remember
|
||||
# to use, run from the repo root `make <command>`
|
||||
|
||||
# Runs ruff, fixing any safely-fixable errors and formatting
|
||||
ruff:
|
||||
ruff check . --fix
|
||||
ruff format .
|
||||
|
||||
# Runs ruff, fixing all errors it can fix and formatting
|
||||
ruff-unsafe:
|
||||
ruff check . --fix --unsafe-fixes
|
||||
ruff format .
|
||||
|
||||
# Runs mypy, using the config in pyproject.toml
|
||||
mypy:
|
||||
mypy scripts/invokeai-web.py
|
||||
|
||||
# Runs mypy, ignoring the config in pyproject.toml but still ignoring missing (untyped) imports
|
||||
# (many files are ignored by the config, so this is useful for checking all files)
|
||||
mypy-all:
|
||||
mypy scripts/invokeai-web.py --config-file= --ignore-missing-imports
|
||||
@@ -395,7 +395,7 @@ Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
||||
Please check out our **[Troubleshooting Guide](https://invoke-ai.github.io/InvokeAI/installation/010_INSTALL_AUTOMATED/#troubleshooting)** to get solutions for common installation
|
||||
problems and other issues. For more help, please join our [Discord][discord link]
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -65,7 +65,7 @@ The first set of things we need to do when creating a new Invocation are -
|
||||
So let us do that.
|
||||
|
||||
```python
|
||||
from .baseinvocation import BaseInvocation, invocation
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -99,8 +99,8 @@ create your own custom field types later in this guide. For now, let's go ahead
|
||||
and use it.
|
||||
|
||||
```python
|
||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
||||
from .primitives import ImageField
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -124,8 +124,8 @@ image: ImageField = InputField(description="The input image")
|
||||
Great. Now let us create our other inputs for `width` and `height`
|
||||
|
||||
```python
|
||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
||||
from .primitives import ImageField
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -160,8 +160,8 @@ that are provided by it by InvokeAI.
|
||||
Let us create this function first.
|
||||
|
||||
```python
|
||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
||||
from .primitives import ImageField
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation, InvocationContext
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -189,9 +189,9 @@ all the necessary info related to image outputs. So let us use that.
|
||||
We will cover how to create your own output types later in this guide.
|
||||
|
||||
```python
|
||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
||||
from .primitives import ImageField
|
||||
from .image import ImageOutput
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation, InvocationContext
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.image import ImageOutput
|
||||
|
||||
@invocation('resize')
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
@@ -216,9 +216,9 @@ Perfect. Now that we have our Invocation setup, let us do what we want to do.
|
||||
So let's do that.
|
||||
|
||||
```python
|
||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
||||
from .primitives import ImageField
|
||||
from .image import ImageOutput
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation, InvocationContext
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.image import ImageOutput, ResourceOrigin, ImageCategory
|
||||
|
||||
@invocation("resize")
|
||||
class ResizeInvocation(BaseInvocation):
|
||||
|
||||
@@ -8,7 +8,7 @@ To use a node, add the node to the `nodes` folder found in your InvokeAI install
|
||||
|
||||
The suggested method is to use `git clone` to clone the repository the node is found in. This allows for easy updates of the node in the future.
|
||||
|
||||
If you'd prefer, you can also just download the `.py` file from the linked repository and add it to the `nodes` folder.
|
||||
If you'd prefer, you can also just download the whole node folder from the linked repository and add it to the `nodes` folder.
|
||||
|
||||
To use a community workflow, download the the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
|
||||
|
||||
@@ -26,8 +26,10 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
+ [Image Picker](#image-picker)
|
||||
+ [Load Video Frame](#load-video-frame)
|
||||
+ [Make 3D](#make-3d)
|
||||
+ [Match Histogram](#match-histogram)
|
||||
+ [Oobabooga](#oobabooga)
|
||||
+ [Prompt Tools](#prompt-tools)
|
||||
+ [Remote Image](#remote-image)
|
||||
+ [Retroize](#retroize)
|
||||
+ [Size Stepper Nodes](#size-stepper-nodes)
|
||||
+ [Text font to Image](#text-font-to-image)
|
||||
@@ -207,6 +209,23 @@ This includes 15 Nodes:
|
||||
<img src="https://gitlab.com/srcrr/shift3d/-/raw/main/example-1.png" width="300" />
|
||||
<img src="https://gitlab.com/srcrr/shift3d/-/raw/main/example-2.png" width="300" />
|
||||
|
||||
--------------------------------
|
||||
### Match Histogram
|
||||
|
||||
**Description:** An InvokeAI node to match a histogram from one image to another. This is a bit like the `color correct` node in the main InvokeAI but this works in the YCbCr colourspace and can handle images of different sizes. Also does not require a mask input.
|
||||
- Option to only transfer luminance channel.
|
||||
- Option to save output as grayscale
|
||||
|
||||
A good use case for this node is to normalize the colors of an image that has been through the tiled scaling workflow of my XYGrid Nodes.
|
||||
|
||||
See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/main/README.md
|
||||
|
||||
**Node Link:** https://github.com/skunkworxdark/match_histogram
|
||||
|
||||
**Output Examples**
|
||||
|
||||
<img src="https://github.com/skunkworxdark/match_histogram/assets/21961335/ed12f329-a0ef-444a-9bae-129ed60d6097" width="300" />
|
||||
|
||||
--------------------------------
|
||||
### Oobabooga
|
||||
|
||||
@@ -236,22 +255,41 @@ This node works best with SDXL models, especially as the style can be described
|
||||
--------------------------------
|
||||
### Prompt Tools
|
||||
|
||||
**Description:** A set of InvokeAI nodes that add general prompt manipulation tools. These were written to accompany the PromptsFromFile node and other prompt generation nodes.
|
||||
**Description:** A set of InvokeAI nodes that add general prompt (string) manipulation tools. Designed to accompany the `Prompts From File` node and other prompt generation nodes.
|
||||
|
||||
1. `Prompt To File` - saves a prompt or collection of prompts to a file. one per line. There is an append/overwrite option.
|
||||
2. `PTFields Collect` - Converts image generation fields into a Json format string that can be passed to Prompt to file.
|
||||
3. `PTFields Expand` - Takes Json string and converts it to individual generation parameters. This can be fed from the Prompts to file node.
|
||||
4. `Prompt Strength` - Formats prompt with strength like the weighted format of compel
|
||||
5. `Prompt Strength Combine` - Combines weighted prompts for .and()/.blend()
|
||||
6. `CSV To Index String` - Gets a string from a CSV by index. Includes a Random index option
|
||||
|
||||
The following Nodes are now included in v3.2 of Invoke and are nolonger in this set of tools.<br>
|
||||
- `Prompt Join` -> `String Join`
|
||||
- `Prompt Join Three` -> `String Join Three`
|
||||
- `Prompt Replace` -> `String Replace`
|
||||
- `Prompt Split Neg` -> `String Split Neg`
|
||||
|
||||
1. PromptJoin - Joins to prompts into one.
|
||||
2. PromptReplace - performs a search and replace on a prompt. With the option of using regex.
|
||||
3. PromptSplitNeg - splits a prompt into positive and negative using the old V2 method of [] for negative.
|
||||
4. PromptToFile - saves a prompt or collection of prompts to a file. one per line. There is an append/overwrite option.
|
||||
5. PTFieldsCollect - Converts image generation fields into a Json format string that can be passed to Prompt to file.
|
||||
6. PTFieldsExpand - Takes Json string and converts it to individual generation parameters This can be fed from the Prompts to file node.
|
||||
7. PromptJoinThree - Joins 3 prompt together.
|
||||
8. PromptStrength - This take a string and float and outputs another string in the format of (string)strength like the weighted format of compel.
|
||||
9. PromptStrengthCombine - This takes a collection of prompt strength strings and outputs a string in the .and() or .blend() format that can be fed into a proper prompt node.
|
||||
|
||||
See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/main/README.md
|
||||
|
||||
**Node Link:** https://github.com/skunkworxdark/Prompt-tools-nodes
|
||||
|
||||
**Workflow Examples**
|
||||
|
||||
<img src="https://github.com/skunkworxdark/prompt-tools/blob/main/images/CSVToIndexStringNode.png" width="300" />
|
||||
|
||||
--------------------------------
|
||||
### Remote Image
|
||||
|
||||
**Description:** This is a pack of nodes to interoperate with other services, be they public websites or bespoke local servers. The pack consists of these nodes:
|
||||
|
||||
- *Load Remote Image* - Lets you load remote images such as a realtime webcam image, an image of the day, or dynamically created images.
|
||||
- *Post Image to Remote Server* - Lets you upload an image to a remote server using an HTTP POST request, eg for storage, display or further processing.
|
||||
|
||||
**Node Link:** https://github.com/fieldOfView/InvokeAI-remote_image
|
||||
|
||||
|
||||
--------------------------------
|
||||
### Retroize
|
||||
|
||||
@@ -327,15 +365,27 @@ Highlights/Midtones/Shadows (with LUT blur enabled):
|
||||
--------------------------------
|
||||
### XY Image to Grid and Images to Grids nodes
|
||||
|
||||
**Description:** Image to grid nodes and supporting tools.
|
||||
**Description:** These nodes add the following to InvokeAI:
|
||||
- Generate grids of images from multiple input images
|
||||
- Create XY grid images with labels from parameters
|
||||
- Split images into overlapping tiles for processing (for super-resolution workflows)
|
||||
- Recombine image tiles into a single output image blending the seams
|
||||
|
||||
1. "Images To Grids" node - Takes a collection of images and creates a grid(s) of images. If there are more images than the size of a single grid then multiple grids will be created until it runs out of images.
|
||||
2. "XYImage To Grid" node - Converts a collection of XYImages into a labeled Grid of images. The XYImages collection has to be built using the supporting nodes. See example node setups for more details.
|
||||
The nodes include:
|
||||
1. `Images To Grids` - Combine multiple images into a grid of images
|
||||
2. `XYImage To Grid` - Take X & Y params and creates a labeled image grid.
|
||||
3. `XYImage Tiles` - Super-resolution (embiggen) style tiled resizing
|
||||
4. `Image Tot XYImages` - Takes an image and cuts it up into a number of columns and rows.
|
||||
5. Multiple supporting nodes - Helper nodes for data wrangling and building `XYImage` collections
|
||||
|
||||
See full docs here: https://github.com/skunkworxdark/XYGrid_nodes/edit/main/README.md
|
||||
|
||||
**Node Link:** https://github.com/skunkworxdark/XYGrid_nodes
|
||||
|
||||
**Output Examples**
|
||||
|
||||
<img src="https://github.com/skunkworxdark/XYGrid_nodes/blob/main/images/collage.png" width="300" />
|
||||
|
||||
--------------------------------
|
||||
### Example Node Template
|
||||
|
||||
|
||||
@@ -1,104 +1,106 @@
|
||||
# List of Default Nodes
|
||||
|
||||
The table below contains a list of the default nodes shipped with InvokeAI and their descriptions.
|
||||
The table below contains a list of the default nodes shipped with InvokeAI and
|
||||
their descriptions.
|
||||
|
||||
| Node <img width=160 align="right"> | Function |
|
||||
|: ---------------------------------- | :--------------------------------------------------------------------------------------|
|
||||
|Add Integers | Adds two numbers|
|
||||
|Boolean Primitive Collection | A collection of boolean primitive values|
|
||||
|Boolean Primitive | A boolean primitive value|
|
||||
|Canny Processor | Canny edge detection for ControlNet|
|
||||
|CLIP Skip | Skip layers in clip text_encoder model.|
|
||||
|Collect | Collects values into a collection|
|
||||
|Color Correct | Shifts the colors of a target image to match the reference image, optionally using a mask to only color-correct certain regions of the target image.|
|
||||
|Color Primitive | A color primitive value|
|
||||
|Compel Prompt | Parse prompt using compel package to conditioning.|
|
||||
|Conditioning Primitive Collection | A collection of conditioning tensor primitive values|
|
||||
|Conditioning Primitive | A conditioning tensor primitive value|
|
||||
|Content Shuffle Processor | Applies content shuffle processing to image|
|
||||
|ControlNet | Collects ControlNet info to pass to other nodes|
|
||||
|Denoise Latents | Denoises noisy latents to decodable images|
|
||||
|Divide Integers | Divides two numbers|
|
||||
|Dynamic Prompt | Parses a prompt using adieyal/dynamicprompts' random or combinatorial generator|
|
||||
|[FaceMask](./detailedNodes/faceTools.md#facemask) | Generates masks for faces in an image to use with Inpainting|
|
||||
|[FaceIdentifier](./detailedNodes/faceTools.md#faceidentifier) | Identifies and labels faces in an image|
|
||||
|[FaceOff](./detailedNodes/faceTools.md#faceoff) | Creates a new image that is a scaled bounding box with a mask on the face for Inpainting|
|
||||
|Float Math | Perform basic math operations on two floats|
|
||||
|Float Primitive Collection | A collection of float primitive values|
|
||||
|Float Primitive | A float primitive value|
|
||||
|Float Range | Creates a range|
|
||||
|HED (softedge) Processor | Applies HED edge detection to image|
|
||||
|Blur Image | Blurs an image|
|
||||
|Extract Image Channel | Gets a channel from an image.|
|
||||
|Image Primitive Collection | A collection of image primitive values|
|
||||
|Integer Math | Perform basic math operations on two integers|
|
||||
|Convert Image Mode | Converts an image to a different mode.|
|
||||
|Crop Image | Crops an image to a specified box. The box can be outside of the image.|
|
||||
|Image Hue Adjustment | Adjusts the Hue of an image.|
|
||||
|Inverse Lerp Image | Inverse linear interpolation of all pixels of an image|
|
||||
|Image Primitive | An image primitive value|
|
||||
|Lerp Image | Linear interpolation of all pixels of an image|
|
||||
|Offset Image Channel | Add to or subtract from an image color channel by a uniform value.|
|
||||
|Multiply Image Channel | Multiply or Invert an image color channel by a scalar value.|
|
||||
|Multiply Images | Multiplies two images together using `PIL.ImageChops.multiply()`.|
|
||||
|Blur NSFW Image | Add blur to NSFW-flagged images|
|
||||
|Paste Image | Pastes an image into another image.|
|
||||
|ImageProcessor | Base class for invocations that preprocess images for ControlNet|
|
||||
|Resize Image | Resizes an image to specific dimensions|
|
||||
|Round Float | Rounds a float to a specified number of decimal places|
|
||||
|Float to Integer | Converts a float to an integer. Optionally rounds to an even multiple of a input number.|
|
||||
|Scale Image | Scales an image by a factor|
|
||||
|Image to Latents | Encodes an image into latents.|
|
||||
|Add Invisible Watermark | Add an invisible watermark to an image|
|
||||
|Solid Color Infill | Infills transparent areas of an image with a solid color|
|
||||
|PatchMatch Infill | Infills transparent areas of an image using the PatchMatch algorithm|
|
||||
|Tile Infill | Infills transparent areas of an image with tiles of the image|
|
||||
|Integer Primitive Collection | A collection of integer primitive values|
|
||||
|Integer Primitive | An integer primitive value|
|
||||
|Iterate | Iterates over a list of items|
|
||||
|Latents Primitive Collection | A collection of latents tensor primitive values|
|
||||
|Latents Primitive | A latents tensor primitive value|
|
||||
|Latents to Image | Generates an image from latents.|
|
||||
|Leres (Depth) Processor | Applies leres processing to image|
|
||||
|Lineart Anime Processor | Applies line art anime processing to image|
|
||||
|Lineart Processor | Applies line art processing to image|
|
||||
|LoRA Loader | Apply selected lora to unet and text_encoder.|
|
||||
|Main Model Loader | Loads a main model, outputting its submodels.|
|
||||
|Combine Mask | Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`.|
|
||||
|Mask Edge | Applies an edge mask to an image|
|
||||
|Mask from Alpha | Extracts the alpha channel of an image as a mask.|
|
||||
|Mediapipe Face Processor | Applies mediapipe face processing to image|
|
||||
|Midas (Depth) Processor | Applies Midas depth processing to image|
|
||||
|MLSD Processor | Applies MLSD processing to image|
|
||||
|Multiply Integers | Multiplies two numbers|
|
||||
|Noise | Generates latent noise.|
|
||||
|Normal BAE Processor | Applies NormalBae processing to image|
|
||||
|ONNX Latents to Image | Generates an image from latents.|
|
||||
|ONNX Prompt (Raw) | A node to process inputs and produce outputs. May use dependency injection in __init__ to receive providers.|
|
||||
|ONNX Text to Latents | Generates latents from conditionings.|
|
||||
|ONNX Model Loader | Loads a main model, outputting its submodels.|
|
||||
|OpenCV Inpaint | Simple inpaint using opencv.|
|
||||
|Openpose Processor | Applies Openpose processing to image|
|
||||
|PIDI Processor | Applies PIDI processing to image|
|
||||
|Prompts from File | Loads prompts from a text file|
|
||||
|Random Integer | Outputs a single random integer.|
|
||||
|Random Range | Creates a collection of random numbers|
|
||||
|Integer Range | Creates a range of numbers from start to stop with step|
|
||||
|Integer Range of Size | Creates a range from start to start + size with step|
|
||||
|Resize Latents | Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8.|
|
||||
|SDXL Compel Prompt | Parse prompt using compel package to conditioning.|
|
||||
|SDXL LoRA Loader | Apply selected lora to unet and text_encoder.|
|
||||
|SDXL Main Model Loader | Loads an sdxl base model, outputting its submodels.|
|
||||
|SDXL Refiner Compel Prompt | Parse prompt using compel package to conditioning.|
|
||||
|SDXL Refiner Model Loader | Loads an sdxl refiner model, outputting its submodels.|
|
||||
|Scale Latents | Scales latents by a given factor.|
|
||||
|Segment Anything Processor | Applies segment anything processing to image|
|
||||
|Show Image | Displays a provided image, and passes it forward in the pipeline.|
|
||||
|Step Param Easing | Experimental per-step parameter easing for denoising steps|
|
||||
|String Primitive Collection | A collection of string primitive values|
|
||||
|String Primitive | A string primitive value|
|
||||
|Subtract Integers | Subtracts two numbers|
|
||||
|Tile Resample Processor | Tile resampler processor|
|
||||
|Upscale (RealESRGAN) | Upscales an image using RealESRGAN.|
|
||||
|VAE Loader | Loads a VAE model, outputting a VaeLoaderOutput|
|
||||
|Zoe (Depth) Processor | Applies Zoe depth processing to image|
|
||||
| Node <img width=160 align="right"> | Function |
|
||||
| :------------------------------------------------------------ | :--------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Add Integers | Adds two numbers |
|
||||
| Boolean Primitive Collection | A collection of boolean primitive values |
|
||||
| Boolean Primitive | A boolean primitive value |
|
||||
| Canny Processor | Canny edge detection for ControlNet |
|
||||
| CenterPadCrop | Pad or crop an image's sides from the center by specified pixels. Positive values are outside of the image. |
|
||||
| CLIP Skip | Skip layers in clip text_encoder model. |
|
||||
| Collect | Collects values into a collection |
|
||||
| Color Correct | Shifts the colors of a target image to match the reference image, optionally using a mask to only color-correct certain regions of the target image. |
|
||||
| Color Primitive | A color primitive value |
|
||||
| Compel Prompt | Parse prompt using compel package to conditioning. |
|
||||
| Conditioning Primitive Collection | A collection of conditioning tensor primitive values |
|
||||
| Conditioning Primitive | A conditioning tensor primitive value |
|
||||
| Content Shuffle Processor | Applies content shuffle processing to image |
|
||||
| ControlNet | Collects ControlNet info to pass to other nodes |
|
||||
| Denoise Latents | Denoises noisy latents to decodable images |
|
||||
| Divide Integers | Divides two numbers |
|
||||
| Dynamic Prompt | Parses a prompt using adieyal/dynamicprompts' random or combinatorial generator |
|
||||
| [FaceMask](./detailedNodes/faceTools.md#facemask) | Generates masks for faces in an image to use with Inpainting |
|
||||
| [FaceIdentifier](./detailedNodes/faceTools.md#faceidentifier) | Identifies and labels faces in an image |
|
||||
| [FaceOff](./detailedNodes/faceTools.md#faceoff) | Creates a new image that is a scaled bounding box with a mask on the face for Inpainting |
|
||||
| Float Math | Perform basic math operations on two floats |
|
||||
| Float Primitive Collection | A collection of float primitive values |
|
||||
| Float Primitive | A float primitive value |
|
||||
| Float Range | Creates a range |
|
||||
| HED (softedge) Processor | Applies HED edge detection to image |
|
||||
| Blur Image | Blurs an image |
|
||||
| Extract Image Channel | Gets a channel from an image. |
|
||||
| Image Primitive Collection | A collection of image primitive values |
|
||||
| Integer Math | Perform basic math operations on two integers |
|
||||
| Convert Image Mode | Converts an image to a different mode. |
|
||||
| Crop Image | Crops an image to a specified box. The box can be outside of the image. |
|
||||
| Image Hue Adjustment | Adjusts the Hue of an image. |
|
||||
| Inverse Lerp Image | Inverse linear interpolation of all pixels of an image |
|
||||
| Image Primitive | An image primitive value |
|
||||
| Lerp Image | Linear interpolation of all pixels of an image |
|
||||
| Offset Image Channel | Add to or subtract from an image color channel by a uniform value. |
|
||||
| Multiply Image Channel | Multiply or Invert an image color channel by a scalar value. |
|
||||
| Multiply Images | Multiplies two images together using `PIL.ImageChops.multiply()`. |
|
||||
| Blur NSFW Image | Add blur to NSFW-flagged images |
|
||||
| Paste Image | Pastes an image into another image. |
|
||||
| ImageProcessor | Base class for invocations that preprocess images for ControlNet |
|
||||
| Resize Image | Resizes an image to specific dimensions |
|
||||
| Round Float | Rounds a float to a specified number of decimal places |
|
||||
| Float to Integer | Converts a float to an integer. Optionally rounds to an even multiple of a input number. |
|
||||
| Scale Image | Scales an image by a factor |
|
||||
| Image to Latents | Encodes an image into latents. |
|
||||
| Add Invisible Watermark | Add an invisible watermark to an image |
|
||||
| Solid Color Infill | Infills transparent areas of an image with a solid color |
|
||||
| PatchMatch Infill | Infills transparent areas of an image using the PatchMatch algorithm |
|
||||
| Tile Infill | Infills transparent areas of an image with tiles of the image |
|
||||
| Integer Primitive Collection | A collection of integer primitive values |
|
||||
| Integer Primitive | An integer primitive value |
|
||||
| Iterate | Iterates over a list of items |
|
||||
| Latents Primitive Collection | A collection of latents tensor primitive values |
|
||||
| Latents Primitive | A latents tensor primitive value |
|
||||
| Latents to Image | Generates an image from latents. |
|
||||
| Leres (Depth) Processor | Applies leres processing to image |
|
||||
| Lineart Anime Processor | Applies line art anime processing to image |
|
||||
| Lineart Processor | Applies line art processing to image |
|
||||
| LoRA Loader | Apply selected lora to unet and text_encoder. |
|
||||
| Main Model Loader | Loads a main model, outputting its submodels. |
|
||||
| Combine Mask | Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`. |
|
||||
| Mask Edge | Applies an edge mask to an image |
|
||||
| Mask from Alpha | Extracts the alpha channel of an image as a mask. |
|
||||
| Mediapipe Face Processor | Applies mediapipe face processing to image |
|
||||
| Midas (Depth) Processor | Applies Midas depth processing to image |
|
||||
| MLSD Processor | Applies MLSD processing to image |
|
||||
| Multiply Integers | Multiplies two numbers |
|
||||
| Noise | Generates latent noise. |
|
||||
| Normal BAE Processor | Applies NormalBae processing to image |
|
||||
| ONNX Latents to Image | Generates an image from latents. |
|
||||
| ONNX Prompt (Raw) | A node to process inputs and produce outputs. May use dependency injection in **init** to receive providers. |
|
||||
| ONNX Text to Latents | Generates latents from conditionings. |
|
||||
| ONNX Model Loader | Loads a main model, outputting its submodels. |
|
||||
| OpenCV Inpaint | Simple inpaint using opencv. |
|
||||
| Openpose Processor | Applies Openpose processing to image |
|
||||
| PIDI Processor | Applies PIDI processing to image |
|
||||
| Prompts from File | Loads prompts from a text file |
|
||||
| Random Integer | Outputs a single random integer. |
|
||||
| Random Range | Creates a collection of random numbers |
|
||||
| Integer Range | Creates a range of numbers from start to stop with step |
|
||||
| Integer Range of Size | Creates a range from start to start + size with step |
|
||||
| Resize Latents | Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8. |
|
||||
| SDXL Compel Prompt | Parse prompt using compel package to conditioning. |
|
||||
| SDXL LoRA Loader | Apply selected lora to unet and text_encoder. |
|
||||
| SDXL Main Model Loader | Loads an sdxl base model, outputting its submodels. |
|
||||
| SDXL Refiner Compel Prompt | Parse prompt using compel package to conditioning. |
|
||||
| SDXL Refiner Model Loader | Loads an sdxl refiner model, outputting its submodels. |
|
||||
| Scale Latents | Scales latents by a given factor. |
|
||||
| Segment Anything Processor | Applies segment anything processing to image |
|
||||
| Show Image | Displays a provided image, and passes it forward in the pipeline. |
|
||||
| Step Param Easing | Experimental per-step parameter easing for denoising steps |
|
||||
| String Primitive Collection | A collection of string primitive values |
|
||||
| String Primitive | A string primitive value |
|
||||
| Subtract Integers | Subtracts two numbers |
|
||||
| Tile Resample Processor | Tile resampler processor |
|
||||
| Upscale (RealESRGAN) | Upscales an image using RealESRGAN. |
|
||||
| VAE Loader | Loads a VAE model, outputting a VaeLoaderOutput |
|
||||
| Zoe (Depth) Processor | Applies Zoe depth processing to image |
|
||||
|
||||
@@ -7,12 +7,12 @@ To use them, right click on your desired workflow, follow the link to GitHub and
|
||||
If you're interested in finding more workflows, checkout the [#share-your-workflows](https://discord.com/channels/1020123559063990373/1130291608097661000) channel in the InvokeAI Discord.
|
||||
|
||||
* [SD1.5 / SD2 Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/Text_to_Image.json)
|
||||
* [SDXL Text to Image](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/SDXL_Text_to_Image.json)
|
||||
* [SDXL Text to Image with Refiner](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/SDXL_w_Refiner_Text_to_Image.json)
|
||||
* [Multi ControlNet (Canny & Depth)](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/Multi_ControlNet_Canny_and_Depth.json)
|
||||
* [SDXL Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/SDXL_Text_to_Image.json)
|
||||
* [SDXL Text to Image with Refiner](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/SDXL_w_Refiner_Text_to_Image.json)
|
||||
* [Multi ControlNet (Canny & Depth)](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/Multi_ControlNet_Canny_and_Depth.json)
|
||||
* [Tiled Upscaling with ControlNet](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/ESRGAN_img2img_upscale_w_Canny_ControlNet.json)
|
||||
* [Prompt From File](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/Prompt_from_File.json)
|
||||
* [Face Detailer with IP-Adapter & ControlNet](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/Face_Detailer_with_IP-Adapter_and_Canny.json.json)
|
||||
* [Prompt From File](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/Prompt_from_File.json)
|
||||
* [Face Detailer with IP-Adapter & ControlNet](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/Face_Detailer_with_IP-Adapter_and_Canny.json)
|
||||
* [FaceMask](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/FaceMask.json)
|
||||
* [FaceOff with 2x Face Scaling](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/FaceOff_FaceScale2x.json)
|
||||
* [QR Code Monster](https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/QR_Code_Monster.json)
|
||||
* [QR Code Monster](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/QR_Code_Monster.json)
|
||||
|
||||
@@ -93,6 +93,18 @@ async def Pause(
|
||||
return ApiDependencies.invoker.services.session_processor.pause()
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/processor/take_one",
|
||||
operation_id="take_one",
|
||||
responses={200: {"model": SessionProcessorStatus}},
|
||||
)
|
||||
async def take_one(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
) -> SessionProcessorStatus:
|
||||
"""Executes the next-in-line queue item, pausing the processor afterwards. Has no effect if the queue is resumed."""
|
||||
return ApiDependencies.invoker.services.session_processor.take_one()
|
||||
|
||||
|
||||
@session_queue_router.put(
|
||||
"/{queue_id}/cancel_by_batch_ids",
|
||||
operation_id="cancel_by_batch_ids",
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
from typing import Any
|
||||
|
||||
from fastapi.responses import HTMLResponse
|
||||
|
||||
from .services.config import InvokeAIAppConfig
|
||||
|
||||
# parse_args() must be called before any other imports. if it is not called first, consumers of the config
|
||||
# which are imported/used before parse_args() is called will get the default config values instead of the
|
||||
# values from the command line or config file.
|
||||
import sys
|
||||
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
|
||||
from .services.config import InvokeAIAppConfig
|
||||
|
||||
app_config = InvokeAIAppConfig.get_config()
|
||||
app_config.parse_args()
|
||||
if app_config.version:
|
||||
print(f"InvokeAI version {__version__}")
|
||||
sys.exit(0)
|
||||
|
||||
if True: # hack to make flake8 happy with imports coming after setting up the config
|
||||
import asyncio
|
||||
@@ -16,6 +19,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c
|
||||
import socket
|
||||
from inspect import signature
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import uvicorn
|
||||
from fastapi import FastAPI
|
||||
@@ -23,7 +27,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c
|
||||
from fastapi.middleware.gzip import GZipMiddleware
|
||||
from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
|
||||
from fastapi.openapi.utils import get_openapi
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.responses import FileResponse, HTMLResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
||||
@@ -34,7 +38,6 @@ if True: # hack to make flake8 happy with imports coming after setting up the c
|
||||
# noinspection PyUnresolvedReferences
|
||||
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
|
||||
import invokeai.frontend.web as web_dir
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
|
||||
from ..backend.util.logging import InvokeAILogger
|
||||
from .api.dependencies import ApiDependencies
|
||||
@@ -51,7 +54,12 @@ if True: # hack to make flake8 happy with imports coming after setting up the c
|
||||
workflows,
|
||||
)
|
||||
from .api.sockets import SocketIO
|
||||
from .invocations.baseinvocation import BaseInvocation, UIConfigBase, _InputField, _OutputField
|
||||
from .invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
InputFieldJSONSchemaExtra,
|
||||
OutputFieldJSONSchemaExtra,
|
||||
UIConfigBase,
|
||||
)
|
||||
|
||||
if is_mps_available():
|
||||
import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import)
|
||||
@@ -147,7 +155,11 @@ def custom_openapi() -> dict[str, Any]:
|
||||
|
||||
# Add Node Editor UI helper schemas
|
||||
ui_config_schemas = models_json_schema(
|
||||
[(UIConfigBase, "serialization"), (_InputField, "serialization"), (_OutputField, "serialization")],
|
||||
[
|
||||
(UIConfigBase, "serialization"),
|
||||
(InputFieldJSONSchemaExtra, "serialization"),
|
||||
(OutputFieldJSONSchemaExtra, "serialization"),
|
||||
],
|
||||
ref_template="#/components/schemas/{model}",
|
||||
)
|
||||
for schema_key, ui_config_schema in ui_config_schemas[1]["$defs"].items():
|
||||
@@ -155,7 +167,7 @@ def custom_openapi() -> dict[str, Any]:
|
||||
|
||||
# Add a reference to the output type to additionalProperties of the invoker schema
|
||||
for invoker in all_invocations:
|
||||
invoker_name = invoker.__name__
|
||||
invoker_name = invoker.__name__ # type: ignore [attr-defined] # this is a valid attribute
|
||||
output_type = signature(obj=invoker.invoke).return_annotation
|
||||
output_type_title = output_type_titles[output_type.__name__]
|
||||
invoker_schema = openapi_schema["components"]["schemas"][f"{invoker_name}"]
|
||||
@@ -273,7 +285,4 @@ def invoke_api() -> None:
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if app_config.version:
|
||||
print(f"InvokeAI version {__version__}")
|
||||
else:
|
||||
invoke_api()
|
||||
invoke_api()
|
||||
|
||||
@@ -5,7 +5,7 @@ from pathlib import Path
|
||||
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
|
||||
custom_nodes_path = Path(InvokeAIAppConfig.get_config().custom_nodes_path.absolute())
|
||||
custom_nodes_path = Path(InvokeAIAppConfig.get_config().custom_nodes_path.resolve())
|
||||
custom_nodes_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
custom_nodes_init_path = str(custom_nodes_path / "__init__.py")
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI team
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -8,7 +8,7 @@ from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from inspect import signature
|
||||
from types import UnionType
|
||||
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Iterable, Literal, Optional, Type, TypeVar, Union
|
||||
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Iterable, Literal, Optional, Type, TypeVar, Union, cast
|
||||
|
||||
import semver
|
||||
from pydantic import BaseModel, ConfigDict, Field, RootModel, TypeAdapter, create_model
|
||||
@@ -17,11 +17,17 @@ from pydantic_core import PydanticUndefined
|
||||
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.app.util.metaenum import MetaEnum
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..services.invocation_services import InvocationServices
|
||||
|
||||
logger = InvokeAILogger.get_logger()
|
||||
|
||||
CUSTOM_NODE_PACK_SUFFIX = "__invokeai-custom-node"
|
||||
|
||||
|
||||
class InvalidVersionError(ValueError):
|
||||
pass
|
||||
@@ -31,7 +37,7 @@ class InvalidFieldError(TypeError):
|
||||
pass
|
||||
|
||||
|
||||
class Input(str, Enum):
|
||||
class Input(str, Enum, metaclass=MetaEnum):
|
||||
"""
|
||||
The type of input a field accepts.
|
||||
- `Input.Direct`: The field must have its value provided directly, when the invocation and field \
|
||||
@@ -45,86 +51,124 @@ class Input(str, Enum):
|
||||
Any = "any"
|
||||
|
||||
|
||||
class UIType(str, Enum):
|
||||
class FieldKind(str, Enum, metaclass=MetaEnum):
|
||||
"""
|
||||
Type hints for the UI.
|
||||
If a field should be provided a data type that does not exactly match the python type of the field, \
|
||||
use this to provide the type that should be used instead. See the node development docs for detail \
|
||||
on adding a new field type, which involves client-side changes.
|
||||
The kind of field.
|
||||
- `Input`: An input field on a node.
|
||||
- `Output`: An output field on a node.
|
||||
- `Internal`: A field which is treated as an input, but cannot be used in node definitions. Metadata is
|
||||
one example. It is provided to nodes via the WithMetadata class, and we want to reserve the field name
|
||||
"metadata" for this on all nodes. `FieldKind` is used to short-circuit the field name validation logic,
|
||||
allowing "metadata" for that field.
|
||||
- `NodeAttribute`: The field is a node attribute. These are fields which are not inputs or outputs,
|
||||
but which are used to store information about the node. For example, the `id` and `type` fields are node
|
||||
attributes.
|
||||
|
||||
The presence of this in `json_schema_extra["field_kind"]` is used when initializing node schemas on app
|
||||
startup, and when generating the OpenAPI schema for the workflow editor.
|
||||
"""
|
||||
|
||||
# region Primitives
|
||||
Boolean = "boolean"
|
||||
Color = "ColorField"
|
||||
Conditioning = "ConditioningField"
|
||||
Control = "ControlField"
|
||||
Float = "float"
|
||||
Image = "ImageField"
|
||||
Integer = "integer"
|
||||
Latents = "LatentsField"
|
||||
String = "string"
|
||||
# endregion
|
||||
Input = "input"
|
||||
Output = "output"
|
||||
Internal = "internal"
|
||||
NodeAttribute = "node_attribute"
|
||||
|
||||
# region Collection Primitives
|
||||
BooleanCollection = "BooleanCollection"
|
||||
ColorCollection = "ColorCollection"
|
||||
ConditioningCollection = "ConditioningCollection"
|
||||
ControlCollection = "ControlCollection"
|
||||
FloatCollection = "FloatCollection"
|
||||
ImageCollection = "ImageCollection"
|
||||
IntegerCollection = "IntegerCollection"
|
||||
LatentsCollection = "LatentsCollection"
|
||||
StringCollection = "StringCollection"
|
||||
# endregion
|
||||
|
||||
# region Polymorphic Primitives
|
||||
BooleanPolymorphic = "BooleanPolymorphic"
|
||||
ColorPolymorphic = "ColorPolymorphic"
|
||||
ConditioningPolymorphic = "ConditioningPolymorphic"
|
||||
ControlPolymorphic = "ControlPolymorphic"
|
||||
FloatPolymorphic = "FloatPolymorphic"
|
||||
ImagePolymorphic = "ImagePolymorphic"
|
||||
IntegerPolymorphic = "IntegerPolymorphic"
|
||||
LatentsPolymorphic = "LatentsPolymorphic"
|
||||
StringPolymorphic = "StringPolymorphic"
|
||||
# endregion
|
||||
class UIType(str, Enum, metaclass=MetaEnum):
|
||||
"""
|
||||
Type hints for the UI for situations in which the field type is not enough to infer the correct UI type.
|
||||
|
||||
# region Models
|
||||
MainModel = "MainModelField"
|
||||
- Model Fields
|
||||
The most common node-author-facing use will be for model fields. Internally, there is no difference
|
||||
between SD-1, SD-2 and SDXL model fields - they all use the class `MainModelField`. To ensure the
|
||||
base-model-specific UI is rendered, use e.g. `ui_type=UIType.SDXLMainModelField` to indicate that
|
||||
the field is an SDXL main model field.
|
||||
|
||||
- Any Field
|
||||
We cannot infer the usage of `typing.Any` via schema parsing, so you *must* use `ui_type=UIType.Any` to
|
||||
indicate that the field accepts any type. Use with caution. This cannot be used on outputs.
|
||||
|
||||
- Scheduler Field
|
||||
Special handling in the UI is needed for this field, which otherwise would be parsed as a plain enum field.
|
||||
|
||||
- Internal Fields
|
||||
Similar to the Any Field, the `collect` and `iterate` nodes make use of `typing.Any`. To facilitate
|
||||
handling these types in the client, we use `UIType._Collection` and `UIType._CollectionItem`. These
|
||||
should not be used by node authors.
|
||||
|
||||
- DEPRECATED Fields
|
||||
These types are deprecated and should not be used by node authors. A warning will be logged if one is
|
||||
used, and the type will be ignored. They are included here for backwards compatibility.
|
||||
"""
|
||||
|
||||
# region Model Field Types
|
||||
SDXLMainModel = "SDXLMainModelField"
|
||||
SDXLRefinerModel = "SDXLRefinerModelField"
|
||||
ONNXModel = "ONNXModelField"
|
||||
VaeModel = "VaeModelField"
|
||||
VaeModel = "VAEModelField"
|
||||
LoRAModel = "LoRAModelField"
|
||||
ControlNetModel = "ControlNetModelField"
|
||||
IPAdapterModel = "IPAdapterModelField"
|
||||
UNet = "UNetField"
|
||||
Vae = "VaeField"
|
||||
CLIP = "ClipField"
|
||||
# endregion
|
||||
|
||||
# region Iterate/Collect
|
||||
Collection = "Collection"
|
||||
CollectionItem = "CollectionItem"
|
||||
# region Misc Field Types
|
||||
Scheduler = "SchedulerField"
|
||||
Any = "AnyField"
|
||||
# endregion
|
||||
|
||||
# region Misc
|
||||
Enum = "enum"
|
||||
Scheduler = "Scheduler"
|
||||
WorkflowField = "WorkflowField"
|
||||
IsIntermediate = "IsIntermediate"
|
||||
BoardField = "BoardField"
|
||||
Any = "Any"
|
||||
MetadataItem = "MetadataItem"
|
||||
MetadataItemCollection = "MetadataItemCollection"
|
||||
MetadataItemPolymorphic = "MetadataItemPolymorphic"
|
||||
MetadataDict = "MetadataDict"
|
||||
# region Internal Field Types
|
||||
_Collection = "CollectionField"
|
||||
_CollectionItem = "CollectionItemField"
|
||||
# endregion
|
||||
|
||||
# region DEPRECATED
|
||||
Boolean = "DEPRECATED_Boolean"
|
||||
Color = "DEPRECATED_Color"
|
||||
Conditioning = "DEPRECATED_Conditioning"
|
||||
Control = "DEPRECATED_Control"
|
||||
Float = "DEPRECATED_Float"
|
||||
Image = "DEPRECATED_Image"
|
||||
Integer = "DEPRECATED_Integer"
|
||||
Latents = "DEPRECATED_Latents"
|
||||
String = "DEPRECATED_String"
|
||||
BooleanCollection = "DEPRECATED_BooleanCollection"
|
||||
ColorCollection = "DEPRECATED_ColorCollection"
|
||||
ConditioningCollection = "DEPRECATED_ConditioningCollection"
|
||||
ControlCollection = "DEPRECATED_ControlCollection"
|
||||
FloatCollection = "DEPRECATED_FloatCollection"
|
||||
ImageCollection = "DEPRECATED_ImageCollection"
|
||||
IntegerCollection = "DEPRECATED_IntegerCollection"
|
||||
LatentsCollection = "DEPRECATED_LatentsCollection"
|
||||
StringCollection = "DEPRECATED_StringCollection"
|
||||
BooleanPolymorphic = "DEPRECATED_BooleanPolymorphic"
|
||||
ColorPolymorphic = "DEPRECATED_ColorPolymorphic"
|
||||
ConditioningPolymorphic = "DEPRECATED_ConditioningPolymorphic"
|
||||
ControlPolymorphic = "DEPRECATED_ControlPolymorphic"
|
||||
FloatPolymorphic = "DEPRECATED_FloatPolymorphic"
|
||||
ImagePolymorphic = "DEPRECATED_ImagePolymorphic"
|
||||
IntegerPolymorphic = "DEPRECATED_IntegerPolymorphic"
|
||||
LatentsPolymorphic = "DEPRECATED_LatentsPolymorphic"
|
||||
StringPolymorphic = "DEPRECATED_StringPolymorphic"
|
||||
MainModel = "DEPRECATED_MainModel"
|
||||
UNet = "DEPRECATED_UNet"
|
||||
Vae = "DEPRECATED_Vae"
|
||||
CLIP = "DEPRECATED_CLIP"
|
||||
Collection = "DEPRECATED_Collection"
|
||||
CollectionItem = "DEPRECATED_CollectionItem"
|
||||
Enum = "DEPRECATED_Enum"
|
||||
WorkflowField = "DEPRECATED_WorkflowField"
|
||||
IsIntermediate = "DEPRECATED_IsIntermediate"
|
||||
BoardField = "DEPRECATED_BoardField"
|
||||
MetadataItem = "DEPRECATED_MetadataItem"
|
||||
MetadataItemCollection = "DEPRECATED_MetadataItemCollection"
|
||||
MetadataItemPolymorphic = "DEPRECATED_MetadataItemPolymorphic"
|
||||
MetadataDict = "DEPRECATED_MetadataDict"
|
||||
# endregion
|
||||
|
||||
|
||||
class UIComponent(str, Enum):
|
||||
class UIComponent(str, Enum, metaclass=MetaEnum):
|
||||
"""
|
||||
The type of UI component to use for a field, used to override the default components, which are \
|
||||
The type of UI component to use for a field, used to override the default components, which are
|
||||
inferred from the field type.
|
||||
"""
|
||||
|
||||
@@ -133,21 +177,22 @@ class UIComponent(str, Enum):
|
||||
Slider = "slider"
|
||||
|
||||
|
||||
class _InputField(BaseModel):
|
||||
class InputFieldJSONSchemaExtra(BaseModel):
|
||||
"""
|
||||
*DO NOT USE*
|
||||
This helper class is used to tell the client about our custom field attributes via OpenAPI
|
||||
schema generation, and Typescript type generation from that schema. It serves no functional
|
||||
purpose in the backend.
|
||||
Extra attributes to be added to input fields and their OpenAPI schema. Used during graph execution,
|
||||
and by the workflow editor during schema parsing and UI rendering.
|
||||
"""
|
||||
|
||||
input: Input
|
||||
ui_hidden: bool
|
||||
ui_type: Optional[UIType]
|
||||
ui_component: Optional[UIComponent]
|
||||
ui_order: Optional[int]
|
||||
ui_choice_labels: Optional[dict[str, str]]
|
||||
item_default: Optional[Any]
|
||||
orig_required: bool
|
||||
field_kind: FieldKind
|
||||
default: Optional[Any] = None
|
||||
orig_default: Optional[Any] = None
|
||||
ui_hidden: bool = False
|
||||
ui_type: Optional[UIType] = None
|
||||
ui_component: Optional[UIComponent] = None
|
||||
ui_order: Optional[int] = None
|
||||
ui_choice_labels: Optional[dict[str, str]] = None
|
||||
|
||||
model_config = ConfigDict(
|
||||
validate_assignment=True,
|
||||
@@ -155,14 +200,13 @@ class _InputField(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class _OutputField(BaseModel):
|
||||
class OutputFieldJSONSchemaExtra(BaseModel):
|
||||
"""
|
||||
*DO NOT USE*
|
||||
This helper class is used to tell the client about our custom field attributes via OpenAPI
|
||||
schema generation, and Typescript type generation from that schema. It serves no functional
|
||||
purpose in the backend.
|
||||
Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor
|
||||
during schema parsing and UI rendering.
|
||||
"""
|
||||
|
||||
field_kind: FieldKind
|
||||
ui_hidden: bool
|
||||
ui_type: Optional[UIType]
|
||||
ui_order: Optional[int]
|
||||
@@ -173,13 +217,9 @@ class _OutputField(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
def get_type(klass: BaseModel) -> str:
|
||||
"""Helper function to get an invocation or invocation output's type. This is the default value of the `type` field."""
|
||||
return klass.model_fields["type"].default
|
||||
|
||||
|
||||
def InputField(
|
||||
# copied from pydantic's Field
|
||||
# TODO: Can we support default_factory?
|
||||
default: Any = _Unset,
|
||||
default_factory: Callable[[], Any] | None = _Unset,
|
||||
title: str | None = _Unset,
|
||||
@@ -203,12 +243,11 @@ def InputField(
|
||||
ui_hidden: bool = False,
|
||||
ui_order: Optional[int] = None,
|
||||
ui_choice_labels: Optional[dict[str, str]] = None,
|
||||
item_default: Optional[Any] = None,
|
||||
) -> Any:
|
||||
"""
|
||||
Creates an input field for an invocation.
|
||||
|
||||
This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/1.10/usage/schema/#field-customization) \
|
||||
This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/latest/api/fields/#pydantic.fields.Field) \
|
||||
that adds a few extra parameters to support graph execution and the node editor UI.
|
||||
|
||||
:param Input input: [Input.Any] The kind of input this field requires. \
|
||||
@@ -228,28 +267,58 @@ def InputField(
|
||||
For example, a `string` field will default to a single-line input, but you may want a multi-line textarea instead. \
|
||||
For this case, you could provide `UIComponent.Textarea`.
|
||||
|
||||
: param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI.
|
||||
:param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI.
|
||||
|
||||
: param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \
|
||||
:param int ui_order: [None] Specifies the order in which this field should be rendered in the UI.
|
||||
|
||||
: param bool item_default: [None] Specifies the default item value, if this is a collection input. \
|
||||
Ignored for non-collection fields.
|
||||
:param dict[str, str] ui_choice_labels: [None] Specifies the labels to use for the choices in an enum field.
|
||||
"""
|
||||
|
||||
json_schema_extra_: dict[str, Any] = {
|
||||
"input": input,
|
||||
"ui_type": ui_type,
|
||||
"ui_component": ui_component,
|
||||
"ui_hidden": ui_hidden,
|
||||
"ui_order": ui_order,
|
||||
"item_default": item_default,
|
||||
"ui_choice_labels": ui_choice_labels,
|
||||
"_field_kind": "input",
|
||||
}
|
||||
json_schema_extra_ = InputFieldJSONSchemaExtra(
|
||||
input=input,
|
||||
ui_type=ui_type,
|
||||
ui_component=ui_component,
|
||||
ui_hidden=ui_hidden,
|
||||
ui_order=ui_order,
|
||||
ui_choice_labels=ui_choice_labels,
|
||||
field_kind=FieldKind.Input,
|
||||
orig_required=True,
|
||||
)
|
||||
|
||||
"""
|
||||
There is a conflict between the typing of invocation definitions and the typing of an invocation's
|
||||
`invoke()` function.
|
||||
|
||||
On instantiation of a node, the invocation definition is used to create the python class. At this time,
|
||||
any number of fields may be optional, because they may be provided by connections.
|
||||
|
||||
On calling of `invoke()`, however, those fields may be required.
|
||||
|
||||
For example, consider an ResizeImageInvocation with an `image: ImageField` field.
|
||||
|
||||
`image` is required during the call to `invoke()`, but when the python class is instantiated,
|
||||
the field may not be present. This is fine, because that image field will be provided by a
|
||||
connection from an ancestor node, which outputs an image.
|
||||
|
||||
This means we want to type the `image` field as optional for the node class definition, but required
|
||||
for the `invoke()` function.
|
||||
|
||||
If we use `typing.Optional` in the node class definition, the field will be typed as optional in the
|
||||
`invoke()` method, and we'll have to do a lot of runtime checks to ensure the field is present - or
|
||||
any static type analysis tools will complain.
|
||||
|
||||
To get around this, in node class definitions, we type all fields correctly for the `invoke()` function,
|
||||
but secretly make them optional in `InputField()`. We also store the original required bool and/or default
|
||||
value. When we call `invoke()`, we use this stored information to do an additional check on the class.
|
||||
"""
|
||||
|
||||
if default_factory is not _Unset and default_factory is not None:
|
||||
default = default_factory()
|
||||
logger.warn('"default_factory" is not supported, calling it now to set "default"')
|
||||
|
||||
# These are the args we may wish pass to the pydantic `Field()` function
|
||||
field_args = {
|
||||
"default": default,
|
||||
"default_factory": default_factory,
|
||||
"title": title,
|
||||
"description": description,
|
||||
"pattern": pattern,
|
||||
@@ -266,70 +335,34 @@ def InputField(
|
||||
"max_length": max_length,
|
||||
}
|
||||
|
||||
"""
|
||||
Invocation definitions have their fields typed correctly for their `invoke()` functions.
|
||||
This typing is often more specific than the actual invocation definition requires, because
|
||||
fields may have values provided only by connections.
|
||||
|
||||
For example, consider an ResizeImageInvocation with an `image: ImageField` field.
|
||||
|
||||
`image` is required during the call to `invoke()`, but when the python class is instantiated,
|
||||
the field may not be present. This is fine, because that image field will be provided by a
|
||||
an ancestor node that outputs the image.
|
||||
|
||||
So we'd like to type that `image` field as `Optional[ImageField]`. If we do that, however, then
|
||||
we need to handle a lot of extra logic in the `invoke()` function to check if the field has a
|
||||
value or not. This is very tedious.
|
||||
|
||||
Ideally, the invocation definition would be able to specify that the field is required during
|
||||
invocation, but optional during instantiation. So the field would be typed as `image: ImageField`,
|
||||
but when calling the `invoke()` function, we raise an error if the field is not present.
|
||||
|
||||
To do this, we need to do a bit of fanagling to make the pydantic field optional, and then do
|
||||
extra validation when calling `invoke()`.
|
||||
|
||||
There is some additional logic here to cleaning create the pydantic field via the wrapper.
|
||||
"""
|
||||
|
||||
# Filter out field args not provided
|
||||
# We only want to pass the args that were provided, otherwise the `Field()`` function won't work as expected
|
||||
provided_args = {k: v for (k, v) in field_args.items() if v is not PydanticUndefined}
|
||||
|
||||
if (default is not PydanticUndefined) and (default_factory is not PydanticUndefined):
|
||||
raise ValueError("Cannot specify both default and default_factory")
|
||||
# Because we are manually making fields optional, we need to store the original required bool for reference later
|
||||
json_schema_extra_.orig_required = default is PydanticUndefined
|
||||
|
||||
# because we are manually making fields optional, we need to store the original required bool for reference later
|
||||
if default is PydanticUndefined and default_factory is PydanticUndefined:
|
||||
json_schema_extra_.update({"orig_required": True})
|
||||
else:
|
||||
json_schema_extra_.update({"orig_required": False})
|
||||
|
||||
# make Input.Any and Input.Connection fields optional, providing None as a default if the field doesn't already have one
|
||||
if (input is Input.Any or input is Input.Connection) and default_factory is PydanticUndefined:
|
||||
# Make Input.Any and Input.Connection fields optional, providing None as a default if the field doesn't already have one
|
||||
if input is Input.Any or input is Input.Connection:
|
||||
default_ = None if default is PydanticUndefined else default
|
||||
provided_args.update({"default": default_})
|
||||
if default is not PydanticUndefined:
|
||||
# before invoking, we'll grab the original default value and set it on the field if the field wasn't provided a value
|
||||
json_schema_extra_.update({"default": default})
|
||||
json_schema_extra_.update({"orig_default": default})
|
||||
elif default is not PydanticUndefined and default_factory is PydanticUndefined:
|
||||
# Before invoking, we'll check for the original default value and set it on the field if the field has no value
|
||||
json_schema_extra_.default = default
|
||||
json_schema_extra_.orig_default = default
|
||||
elif default is not PydanticUndefined:
|
||||
default_ = default
|
||||
provided_args.update({"default": default_})
|
||||
json_schema_extra_.update({"orig_default": default_})
|
||||
elif default_factory is not PydanticUndefined:
|
||||
provided_args.update({"default_factory": default_factory})
|
||||
# TODO: cannot serialize default_factory...
|
||||
# json_schema_extra_.update(dict(orig_default_factory=default_factory))
|
||||
json_schema_extra_.orig_default = default_
|
||||
|
||||
return Field(
|
||||
**provided_args,
|
||||
json_schema_extra=json_schema_extra_,
|
||||
json_schema_extra=json_schema_extra_.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
|
||||
def OutputField(
|
||||
# copied from pydantic's Field
|
||||
default: Any = _Unset,
|
||||
default_factory: Callable[[], Any] | None = _Unset,
|
||||
title: str | None = _Unset,
|
||||
description: str | None = _Unset,
|
||||
pattern: str | None = _Unset,
|
||||
@@ -362,13 +395,12 @@ def OutputField(
|
||||
`MainModelField`. So to ensure the base-model-specific UI is rendered, you can use \
|
||||
`UIType.SDXLMainModelField` to indicate that the field is an SDXL main model field.
|
||||
|
||||
: param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI. \
|
||||
:param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI. \
|
||||
|
||||
: param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \
|
||||
:param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \
|
||||
"""
|
||||
return Field(
|
||||
default=default,
|
||||
default_factory=default_factory,
|
||||
title=title,
|
||||
description=description,
|
||||
pattern=pattern,
|
||||
@@ -383,12 +415,12 @@ def OutputField(
|
||||
decimal_places=decimal_places,
|
||||
min_length=min_length,
|
||||
max_length=max_length,
|
||||
json_schema_extra={
|
||||
"ui_type": ui_type,
|
||||
"ui_hidden": ui_hidden,
|
||||
"ui_order": ui_order,
|
||||
"_field_kind": "output",
|
||||
},
|
||||
json_schema_extra=OutputFieldJSONSchemaExtra(
|
||||
ui_type=ui_type,
|
||||
ui_hidden=ui_hidden,
|
||||
ui_order=ui_order,
|
||||
field_kind=FieldKind.Output,
|
||||
).model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
|
||||
@@ -401,10 +433,10 @@ class UIConfigBase(BaseModel):
|
||||
tags: Optional[list[str]] = Field(default_factory=None, description="The node's tags")
|
||||
title: Optional[str] = Field(default=None, description="The node's display name")
|
||||
category: Optional[str] = Field(default=None, description="The node's category")
|
||||
version: Optional[str] = Field(
|
||||
default=None,
|
||||
version: str = Field(
|
||||
description='The node\'s version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".',
|
||||
)
|
||||
node_pack: Optional[str] = Field(default=None, description="Whether or not this is a custom node")
|
||||
|
||||
model_config = ConfigDict(
|
||||
validate_assignment=True,
|
||||
@@ -447,29 +479,39 @@ class BaseInvocationOutput(BaseModel):
|
||||
|
||||
@classmethod
|
||||
def register_output(cls, output: BaseInvocationOutput) -> None:
|
||||
"""Registers an invocation output."""
|
||||
cls._output_classes.add(output)
|
||||
|
||||
@classmethod
|
||||
def get_outputs(cls) -> Iterable[BaseInvocationOutput]:
|
||||
"""Gets all invocation outputs."""
|
||||
return cls._output_classes
|
||||
|
||||
@classmethod
|
||||
def get_outputs_union(cls) -> UnionType:
|
||||
"""Gets a union of all invocation outputs."""
|
||||
outputs_union = Union[tuple(cls._output_classes)] # type: ignore [valid-type]
|
||||
return outputs_union # type: ignore [return-value]
|
||||
|
||||
@classmethod
|
||||
def get_output_types(cls) -> Iterable[str]:
|
||||
return (get_type(i) for i in BaseInvocationOutput.get_outputs())
|
||||
"""Gets all invocation output types."""
|
||||
return (i.get_type() for i in BaseInvocationOutput.get_outputs())
|
||||
|
||||
@staticmethod
|
||||
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None:
|
||||
"""Adds various UI-facing attributes to the invocation output's OpenAPI schema."""
|
||||
# Because we use a pydantic Literal field with default value for the invocation type,
|
||||
# it will be typed as optional in the OpenAPI schema. Make it required manually.
|
||||
if "required" not in schema or not isinstance(schema["required"], list):
|
||||
schema["required"] = []
|
||||
schema["required"].extend(["type"])
|
||||
|
||||
@classmethod
|
||||
def get_type(cls) -> str:
|
||||
"""Gets the invocation output's type, as provided by the `@invocation_output` decorator."""
|
||||
return cls.model_fields["type"].default
|
||||
|
||||
model_config = ConfigDict(
|
||||
protected_namespaces=(),
|
||||
validate_assignment=True,
|
||||
@@ -499,21 +541,29 @@ class BaseInvocation(ABC, BaseModel):
|
||||
|
||||
_invocation_classes: ClassVar[set[BaseInvocation]] = set()
|
||||
|
||||
@classmethod
|
||||
def get_type(cls) -> str:
|
||||
"""Gets the invocation's type, as provided by the `@invocation` decorator."""
|
||||
return cls.model_fields["type"].default
|
||||
|
||||
@classmethod
|
||||
def register_invocation(cls, invocation: BaseInvocation) -> None:
|
||||
"""Registers an invocation."""
|
||||
cls._invocation_classes.add(invocation)
|
||||
|
||||
@classmethod
|
||||
def get_invocations_union(cls) -> UnionType:
|
||||
"""Gets a union of all invocation types."""
|
||||
invocations_union = Union[tuple(cls._invocation_classes)] # type: ignore [valid-type]
|
||||
return invocations_union # type: ignore [return-value]
|
||||
|
||||
@classmethod
|
||||
def get_invocations(cls) -> Iterable[BaseInvocation]:
|
||||
"""Gets all invocations, respecting the allowlist and denylist."""
|
||||
app_config = InvokeAIAppConfig.get_config()
|
||||
allowed_invocations: set[BaseInvocation] = set()
|
||||
for sc in cls._invocation_classes:
|
||||
invocation_type = get_type(sc)
|
||||
invocation_type = sc.get_type()
|
||||
is_in_allowlist = (
|
||||
invocation_type in app_config.allow_nodes if isinstance(app_config.allow_nodes, list) else True
|
||||
)
|
||||
@@ -526,28 +576,32 @@ class BaseInvocation(ABC, BaseModel):
|
||||
|
||||
@classmethod
|
||||
def get_invocations_map(cls) -> dict[str, BaseInvocation]:
|
||||
# Get the type strings out of the literals and into a dictionary
|
||||
return {get_type(i): i for i in BaseInvocation.get_invocations()}
|
||||
"""Gets a map of all invocation types to their invocation classes."""
|
||||
return {i.get_type(): i for i in BaseInvocation.get_invocations()}
|
||||
|
||||
@classmethod
|
||||
def get_invocation_types(cls) -> Iterable[str]:
|
||||
return (get_type(i) for i in BaseInvocation.get_invocations())
|
||||
"""Gets all invocation types."""
|
||||
return (i.get_type() for i in BaseInvocation.get_invocations())
|
||||
|
||||
@classmethod
|
||||
def get_output_type(cls) -> BaseInvocationOutput:
|
||||
def get_output_annotation(cls) -> BaseInvocationOutput:
|
||||
"""Gets the invocation's output annotation (i.e. the return annotation of its `invoke()` method)."""
|
||||
return signature(cls.invoke).return_annotation
|
||||
|
||||
@staticmethod
|
||||
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None:
|
||||
# Add the various UI-facing attributes to the schema. These are used to build the invocation templates.
|
||||
uiconfig = getattr(model_class, "UIConfig", None)
|
||||
if uiconfig and hasattr(uiconfig, "title"):
|
||||
schema["title"] = uiconfig.title
|
||||
if uiconfig and hasattr(uiconfig, "tags"):
|
||||
schema["tags"] = uiconfig.tags
|
||||
if uiconfig and hasattr(uiconfig, "category"):
|
||||
schema["category"] = uiconfig.category
|
||||
if uiconfig and hasattr(uiconfig, "version"):
|
||||
def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel], *args, **kwargs) -> None:
|
||||
"""Adds various UI-facing attributes to the invocation's OpenAPI schema."""
|
||||
uiconfig = cast(UIConfigBase | None, getattr(model_class, "UIConfig", None))
|
||||
if uiconfig is not None:
|
||||
if uiconfig.title is not None:
|
||||
schema["title"] = uiconfig.title
|
||||
if uiconfig.tags is not None:
|
||||
schema["tags"] = uiconfig.tags
|
||||
if uiconfig.category is not None:
|
||||
schema["category"] = uiconfig.category
|
||||
if uiconfig.node_pack is not None:
|
||||
schema["node_pack"] = uiconfig.node_pack
|
||||
schema["version"] = uiconfig.version
|
||||
if "required" not in schema or not isinstance(schema["required"], list):
|
||||
schema["required"] = []
|
||||
@@ -559,6 +613,10 @@ class BaseInvocation(ABC, BaseModel):
|
||||
pass
|
||||
|
||||
def invoke_internal(self, context: InvocationContext) -> BaseInvocationOutput:
|
||||
"""
|
||||
Internal invoke method, calls `invoke()` after some prep.
|
||||
Handles optional fields that are required to call `invoke()` and invocation cache.
|
||||
"""
|
||||
for field_name, field in self.model_fields.items():
|
||||
if not field.json_schema_extra or callable(field.json_schema_extra):
|
||||
# something has gone terribly awry, we should always have this and it should be a dict
|
||||
@@ -598,21 +656,20 @@ class BaseInvocation(ABC, BaseModel):
|
||||
context.services.logger.debug(f'Skipping invocation cache for "{self.get_type()}": {self.id}')
|
||||
return self.invoke(context)
|
||||
|
||||
def get_type(self) -> str:
|
||||
return self.model_fields["type"].default
|
||||
|
||||
id: str = Field(
|
||||
default_factory=uuid_string,
|
||||
description="The id of this instance of an invocation. Must be unique among all instances of invocations.",
|
||||
json_schema_extra={"_field_kind": "internal"},
|
||||
json_schema_extra={"field_kind": FieldKind.NodeAttribute},
|
||||
)
|
||||
is_intermediate: bool = Field(
|
||||
default=False,
|
||||
description="Whether or not this is an intermediate invocation.",
|
||||
json_schema_extra={"ui_type": UIType.IsIntermediate, "_field_kind": "internal"},
|
||||
json_schema_extra={"ui_type": "IsIntermediate", "field_kind": FieldKind.NodeAttribute},
|
||||
)
|
||||
use_cache: bool = Field(
|
||||
default=True, description="Whether or not to use the cache", json_schema_extra={"_field_kind": "internal"}
|
||||
default=True,
|
||||
description="Whether or not to use the cache",
|
||||
json_schema_extra={"field_kind": FieldKind.NodeAttribute},
|
||||
)
|
||||
|
||||
UIConfig: ClassVar[Type[UIConfigBase]]
|
||||
@@ -629,12 +686,15 @@ class BaseInvocation(ABC, BaseModel):
|
||||
TBaseInvocation = TypeVar("TBaseInvocation", bound=BaseInvocation)
|
||||
|
||||
|
||||
RESERVED_INPUT_FIELD_NAMES = {
|
||||
RESERVED_NODE_ATTRIBUTE_FIELD_NAMES = {
|
||||
"id",
|
||||
"is_intermediate",
|
||||
"use_cache",
|
||||
"type",
|
||||
"workflow",
|
||||
}
|
||||
|
||||
RESERVED_INPUT_FIELD_NAMES = {
|
||||
"metadata",
|
||||
}
|
||||
|
||||
@@ -652,40 +712,59 @@ RESERVED_PYDANTIC_FIELD_NAMES = {m[0] for m in inspect.getmembers(_Model())}
|
||||
def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None:
|
||||
"""
|
||||
Validates the fields of an invocation or invocation output:
|
||||
- must not override any pydantic reserved fields
|
||||
- must be created via `InputField`, `OutputField`, or be an internal field defined in this file
|
||||
- Must not override any pydantic reserved fields
|
||||
- Must have a type annotation
|
||||
- Must have a json_schema_extra dict
|
||||
- Must have field_kind in json_schema_extra
|
||||
- Field name must not be reserved, according to its field_kind
|
||||
"""
|
||||
for name, field in model_fields.items():
|
||||
if name in RESERVED_PYDANTIC_FIELD_NAMES:
|
||||
raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved by pydantic)')
|
||||
|
||||
field_kind = (
|
||||
# _field_kind is defined via InputField(), OutputField() or by one of the internal fields defined in this file
|
||||
field.json_schema_extra.get("_field_kind", None) if field.json_schema_extra else None
|
||||
)
|
||||
if not field.annotation:
|
||||
raise InvalidFieldError(f'Invalid field type "{name}" on "{model_type}" (missing annotation)')
|
||||
|
||||
if not isinstance(field.json_schema_extra, dict):
|
||||
raise InvalidFieldError(
|
||||
f'Invalid field definition for "{name}" on "{model_type}" (missing json_schema_extra dict)'
|
||||
)
|
||||
|
||||
field_kind = field.json_schema_extra.get("field_kind", None)
|
||||
|
||||
# must have a field_kind
|
||||
if field_kind is None or field_kind not in {"input", "output", "internal"}:
|
||||
if not isinstance(field_kind, FieldKind):
|
||||
raise InvalidFieldError(
|
||||
f'Invalid field definition for "{name}" on "{model_type}" (maybe it\'s not an InputField or OutputField?)'
|
||||
)
|
||||
|
||||
if field_kind == "input" and name in RESERVED_INPUT_FIELD_NAMES:
|
||||
if field_kind is FieldKind.Input and (
|
||||
name in RESERVED_NODE_ATTRIBUTE_FIELD_NAMES or name in RESERVED_INPUT_FIELD_NAMES
|
||||
):
|
||||
raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved input field name)')
|
||||
|
||||
if field_kind == "output" and name in RESERVED_OUTPUT_FIELD_NAMES:
|
||||
if field_kind is FieldKind.Output and name in RESERVED_OUTPUT_FIELD_NAMES:
|
||||
raise InvalidFieldError(f'Invalid field name "{name}" on "{model_type}" (reserved output field name)')
|
||||
|
||||
# internal fields *must* be in the reserved list
|
||||
if (
|
||||
field_kind == "internal"
|
||||
and name not in RESERVED_INPUT_FIELD_NAMES
|
||||
and name not in RESERVED_OUTPUT_FIELD_NAMES
|
||||
):
|
||||
if (field_kind is FieldKind.Internal) and name not in RESERVED_INPUT_FIELD_NAMES:
|
||||
raise InvalidFieldError(
|
||||
f'Invalid field name "{name}" on "{model_type}" (internal field without reserved name)'
|
||||
)
|
||||
|
||||
# node attribute fields *must* be in the reserved list
|
||||
if (
|
||||
field_kind is FieldKind.NodeAttribute
|
||||
and name not in RESERVED_NODE_ATTRIBUTE_FIELD_NAMES
|
||||
and name not in RESERVED_OUTPUT_FIELD_NAMES
|
||||
):
|
||||
raise InvalidFieldError(
|
||||
f'Invalid field name "{name}" on "{model_type}" (node attribute field without reserved name)'
|
||||
)
|
||||
|
||||
ui_type = field.json_schema_extra.get("ui_type", None)
|
||||
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
|
||||
logger.warn(f"\"UIType.{ui_type.split('_')[-1]}\" is deprecated, ignoring")
|
||||
field.json_schema_extra.pop("ui_type")
|
||||
return None
|
||||
|
||||
|
||||
@@ -720,21 +799,30 @@ def invocation(
|
||||
validate_fields(cls.model_fields, invocation_type)
|
||||
|
||||
# Add OpenAPI schema extras
|
||||
uiconf_name = cls.__qualname__ + ".UIConfig"
|
||||
if not hasattr(cls, "UIConfig") or cls.UIConfig.__qualname__ != uiconf_name:
|
||||
cls.UIConfig = type(uiconf_name, (UIConfigBase,), {})
|
||||
if title is not None:
|
||||
cls.UIConfig.title = title
|
||||
if tags is not None:
|
||||
cls.UIConfig.tags = tags
|
||||
if category is not None:
|
||||
cls.UIConfig.category = category
|
||||
uiconfig_name = cls.__qualname__ + ".UIConfig"
|
||||
if not hasattr(cls, "UIConfig") or cls.UIConfig.__qualname__ != uiconfig_name:
|
||||
cls.UIConfig = type(uiconfig_name, (UIConfigBase,), {})
|
||||
cls.UIConfig.title = title
|
||||
cls.UIConfig.tags = tags
|
||||
cls.UIConfig.category = category
|
||||
|
||||
# Grab the node pack's name from the module name, if it's a custom node
|
||||
module_name = cls.__module__.split(".")[0]
|
||||
if module_name.endswith(CUSTOM_NODE_PACK_SUFFIX):
|
||||
cls.UIConfig.node_pack = module_name.split(CUSTOM_NODE_PACK_SUFFIX)[0]
|
||||
else:
|
||||
cls.UIConfig.node_pack = None
|
||||
|
||||
if version is not None:
|
||||
try:
|
||||
semver.Version.parse(version)
|
||||
except ValueError as e:
|
||||
raise InvalidVersionError(f'Invalid version string for node "{invocation_type}": "{version}"') from e
|
||||
cls.UIConfig.version = version
|
||||
else:
|
||||
logger.warn(f'No version specified for node "{invocation_type}", using "1.0.0"')
|
||||
cls.UIConfig.version = "1.0.0"
|
||||
|
||||
if use_cache is not None:
|
||||
cls.model_fields["use_cache"].default = use_cache
|
||||
|
||||
@@ -749,7 +837,7 @@ def invocation(
|
||||
|
||||
invocation_type_annotation = Literal[invocation_type] # type: ignore
|
||||
invocation_type_field = Field(
|
||||
title="type", default=invocation_type, json_schema_extra={"_field_kind": "internal"}
|
||||
title="type", default=invocation_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute}
|
||||
)
|
||||
|
||||
docstring = cls.__doc__
|
||||
@@ -795,7 +883,9 @@ def invocation_output(
|
||||
# Add the output type to the model.
|
||||
|
||||
output_type_annotation = Literal[output_type] # type: ignore
|
||||
output_type_field = Field(title="type", default=output_type, json_schema_extra={"_field_kind": "internal"})
|
||||
output_type_field = Field(
|
||||
title="type", default=output_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute}
|
||||
)
|
||||
|
||||
docstring = cls.__doc__
|
||||
cls = create_model(
|
||||
@@ -827,7 +917,7 @@ WorkflowFieldValidator = TypeAdapter(WorkflowField)
|
||||
|
||||
class WithWorkflow(BaseModel):
|
||||
workflow: Optional[WorkflowField] = Field(
|
||||
default=None, description=FieldDescriptions.workflow, json_schema_extra={"_field_kind": "internal"}
|
||||
default=None, description=FieldDescriptions.workflow, json_schema_extra={"field_kind": FieldKind.NodeAttribute}
|
||||
)
|
||||
|
||||
|
||||
@@ -845,5 +935,11 @@ MetadataFieldValidator = TypeAdapter(MetadataField)
|
||||
|
||||
class WithMetadata(BaseModel):
|
||||
metadata: Optional[MetadataField] = Field(
|
||||
default=None, description=FieldDescriptions.metadata, json_schema_extra={"_field_kind": "internal"}
|
||||
default=None,
|
||||
description=FieldDescriptions.metadata,
|
||||
json_schema_extra=InputFieldJSONSchemaExtra(
|
||||
field_kind=FieldKind.Internal,
|
||||
input=Input.Connection,
|
||||
orig_required=False,
|
||||
).model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
@@ -5,7 +5,7 @@ import numpy as np
|
||||
from pydantic import ValidationInfo, field_validator
|
||||
|
||||
from invokeai.app.invocations.primitives import IntegerCollectionOutput
|
||||
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||
from invokeai.app.util.misc import SEED_MAX
|
||||
|
||||
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation
|
||||
|
||||
@@ -55,7 +55,7 @@ class RangeOfSizeInvocation(BaseInvocation):
|
||||
title="Random Range",
|
||||
tags=["range", "integer", "random", "collection"],
|
||||
category="collections",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
use_cache=False,
|
||||
)
|
||||
class RandomRangeInvocation(BaseInvocation):
|
||||
@@ -65,10 +65,10 @@ class RandomRangeInvocation(BaseInvocation):
|
||||
high: int = InputField(default=np.iinfo(np.int32).max, description="The exclusive high value")
|
||||
size: int = InputField(default=1, description="The number of values to generate")
|
||||
seed: int = InputField(
|
||||
default=0,
|
||||
ge=0,
|
||||
le=SEED_MAX,
|
||||
description="The seed for the RNG (omit for random)",
|
||||
default_factory=get_random_seed,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntegerCollectionOutput:
|
||||
|
||||
@@ -6,6 +6,7 @@ import sys
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import CUSTOM_NODE_PACK_SUFFIX
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
logger = InvokeAILogger.get_logger()
|
||||
@@ -32,13 +33,15 @@ for d in Path(__file__).parent.iterdir():
|
||||
if module_name in globals():
|
||||
continue
|
||||
|
||||
# we have a legit module to import
|
||||
spec = spec_from_file_location(module_name, init.absolute())
|
||||
# load the module, appending adding a suffix to identify it as a custom node pack
|
||||
spec = spec_from_file_location(f"{module_name}{CUSTOM_NODE_PACK_SUFFIX}", init.absolute())
|
||||
|
||||
if spec is None or spec.loader is None:
|
||||
logger.warn(f"Could not load {init}")
|
||||
continue
|
||||
|
||||
logger.info(f"Loading node pack {module_name}")
|
||||
|
||||
module = module_from_spec(spec)
|
||||
sys.modules[spec.name] = module
|
||||
spec.loader.exec_module(module)
|
||||
@@ -47,5 +50,5 @@ for d in Path(__file__).parent.iterdir():
|
||||
|
||||
del init, module_name
|
||||
|
||||
|
||||
logger.info(f"Loaded {loaded_count} modules from {Path(__file__).parent}")
|
||||
if loaded_count > 0:
|
||||
logger.info(f"Loaded {loaded_count} node packs from {Path(__file__).parent}")
|
||||
|
||||
@@ -100,6 +100,61 @@ class ImageCropInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
invocation_type="img_pad_crop",
|
||||
title="Center Pad or Crop Image",
|
||||
category="image",
|
||||
tags=["image", "pad", "crop"],
|
||||
version="1.0.0",
|
||||
)
|
||||
class CenterPadCropInvocation(BaseInvocation):
|
||||
"""Pad or crop an image's sides from the center by specified pixels. Positive values are outside of the image."""
|
||||
|
||||
image: ImageField = InputField(description="The image to crop")
|
||||
left: int = InputField(
|
||||
default=0,
|
||||
description="Number of pixels to pad/crop from the left (negative values crop inwards, positive values pad outwards)",
|
||||
)
|
||||
right: int = InputField(
|
||||
default=0,
|
||||
description="Number of pixels to pad/crop from the right (negative values crop inwards, positive values pad outwards)",
|
||||
)
|
||||
top: int = InputField(
|
||||
default=0,
|
||||
description="Number of pixels to pad/crop from the top (negative values crop inwards, positive values pad outwards)",
|
||||
)
|
||||
bottom: int = InputField(
|
||||
default=0,
|
||||
description="Number of pixels to pad/crop from the bottom (negative values crop inwards, positive values pad outwards)",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(self.image.image_name)
|
||||
|
||||
# Calculate and create new image dimensions
|
||||
new_width = image.width + self.right + self.left
|
||||
new_height = image.height + self.top + self.bottom
|
||||
image_crop = Image.new(mode="RGBA", size=(new_width, new_height), color=(0, 0, 0, 0))
|
||||
|
||||
# Paste new image onto input
|
||||
image_crop.paste(image, (self.left, self.top))
|
||||
|
||||
image_dto = context.services.images.create(
|
||||
image=image_crop,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
image_category=ImageCategory.GENERAL,
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
image=ImageField(image_name=image_dto.image_name),
|
||||
width=image_dto.width,
|
||||
height=image_dto.height,
|
||||
)
|
||||
|
||||
|
||||
@invocation("img_paste", title="Paste Image", tags=["image", "paste"], category="image", version="1.1.0")
|
||||
class ImagePasteInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
||||
"""Pastes an image into another image."""
|
||||
|
||||
@@ -8,7 +8,7 @@ from PIL import Image, ImageOps
|
||||
|
||||
from invokeai.app.invocations.primitives import ColorField, ImageField, ImageOutput
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||
from invokeai.app.util.misc import SEED_MAX
|
||||
from invokeai.backend.image_util.cv2_inpaint import cv2_inpaint
|
||||
from invokeai.backend.image_util.lama import LaMA
|
||||
from invokeai.backend.image_util.patchmatch import PatchMatch
|
||||
@@ -154,17 +154,17 @@ class InfillColorInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
||||
)
|
||||
|
||||
|
||||
@invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.0")
|
||||
@invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.1.1")
|
||||
class InfillTileInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
||||
"""Infills transparent areas of an image with tiles of the image"""
|
||||
|
||||
image: ImageField = InputField(description="The image to infill")
|
||||
tile_size: int = InputField(default=32, ge=1, description="The tile size (px)")
|
||||
seed: int = InputField(
|
||||
default=0,
|
||||
ge=0,
|
||||
le=SEED_MAX,
|
||||
description="The seed to use for tile generation (omit for random)",
|
||||
default_factory=get_random_seed,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
|
||||
@@ -11,7 +11,6 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
InputField,
|
||||
InvocationContext,
|
||||
OutputField,
|
||||
UIType,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
@@ -67,7 +66,7 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
|
||||
# weight: float = InputField(default=1.0, description="The weight of the IP-Adapter.", ui_type=UIType.Float)
|
||||
weight: Union[float, List[float]] = InputField(
|
||||
default=1, ge=-1, description="The weight given to the IP-Adapter", ui_type=UIType.Float, title="Weight"
|
||||
default=1, ge=-1, description="The weight given to the IP-Adapter", title="Weight"
|
||||
)
|
||||
|
||||
begin_step_percent: float = InputField(
|
||||
|
||||
@@ -215,7 +215,7 @@ def get_scheduler(
|
||||
title="Denoise Latents",
|
||||
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
||||
category="latents",
|
||||
version="1.4.0",
|
||||
version="1.5.0",
|
||||
)
|
||||
class DenoiseLatentsInvocation(BaseInvocation):
|
||||
"""Denoises noisy latents to decodable images"""
|
||||
@@ -273,8 +273,14 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
input=Input.Connection,
|
||||
ui_order=7,
|
||||
)
|
||||
cfg_rescale_multiplier: float = InputField(
|
||||
default=0, ge=0, lt=1, description=FieldDescriptions.cfg_rescale_multiplier
|
||||
)
|
||||
latents: Optional[LatentsField] = InputField(
|
||||
default=None, description=FieldDescriptions.latents, input=Input.Connection
|
||||
default=None,
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
ui_order=4,
|
||||
)
|
||||
denoise_mask: Optional[DenoiseMaskField] = InputField(
|
||||
default=None,
|
||||
@@ -329,6 +335,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
unconditioned_embeddings=uc,
|
||||
text_embeddings=c,
|
||||
guidance_scale=self.cfg_scale,
|
||||
guidance_rescale_multiplier=self.cfg_rescale_multiplier,
|
||||
extra=extra_conditioning_info,
|
||||
postprocessing_settings=PostprocessingSettings(
|
||||
threshold=0.0, # threshold,
|
||||
|
||||
@@ -127,6 +127,9 @@ class CoreMetadataInvocation(BaseInvocation):
|
||||
seed: Optional[int] = InputField(default=None, description="The seed used for noise generation")
|
||||
rand_device: Optional[str] = InputField(default=None, description="The device used for random number generation")
|
||||
cfg_scale: Optional[float] = InputField(default=None, description="The classifier-free guidance scale parameter")
|
||||
cfg_rescale_multiplier: Optional[float] = InputField(
|
||||
default=None, description=FieldDescriptions.cfg_rescale_multiplier
|
||||
)
|
||||
steps: Optional[int] = InputField(default=None, description="The number of steps used for inference")
|
||||
scheduler: Optional[str] = InputField(default=None, description="The scheduler used for inference")
|
||||
seamless_x: Optional[bool] = InputField(default=None, description="Whether seamless tiling was used on the X axis")
|
||||
|
||||
@@ -14,7 +14,6 @@ from .baseinvocation import (
|
||||
InputField,
|
||||
InvocationContext,
|
||||
OutputField,
|
||||
UIType,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
@@ -395,7 +394,6 @@ class VaeLoaderInvocation(BaseInvocation):
|
||||
vae_model: VAEModelField = InputField(
|
||||
description=FieldDescriptions.vae_model,
|
||||
input=Input.Direct,
|
||||
ui_type=UIType.VaeModel,
|
||||
title="VAE",
|
||||
)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ from pydantic import field_validator
|
||||
|
||||
from invokeai.app.invocations.latent import LatentsField
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||
from invokeai.app.util.misc import SEED_MAX
|
||||
|
||||
from ...backend.util.devices import choose_torch_device, torch_dtype
|
||||
from .baseinvocation import (
|
||||
@@ -83,16 +83,16 @@ def build_noise_output(latents_name: str, latents: torch.Tensor, seed: int):
|
||||
title="Noise",
|
||||
tags=["latents", "noise"],
|
||||
category="latents",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class NoiseInvocation(BaseInvocation):
|
||||
"""Generates latent noise."""
|
||||
|
||||
seed: int = InputField(
|
||||
default=0,
|
||||
ge=0,
|
||||
le=SEED_MAX,
|
||||
description=FieldDescriptions.seed,
|
||||
default_factory=get_random_seed,
|
||||
)
|
||||
width: int = InputField(
|
||||
default=512,
|
||||
|
||||
@@ -62,12 +62,12 @@ class BooleanInvocation(BaseInvocation):
|
||||
title="Boolean Collection Primitive",
|
||||
tags=["primitives", "boolean", "collection"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class BooleanCollectionInvocation(BaseInvocation):
|
||||
"""A collection of boolean primitive values"""
|
||||
|
||||
collection: list[bool] = InputField(default_factory=list, description="The collection of boolean values")
|
||||
collection: list[bool] = InputField(default=[], description="The collection of boolean values")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> BooleanCollectionOutput:
|
||||
return BooleanCollectionOutput(collection=self.collection)
|
||||
@@ -111,12 +111,12 @@ class IntegerInvocation(BaseInvocation):
|
||||
title="Integer Collection Primitive",
|
||||
tags=["primitives", "integer", "collection"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class IntegerCollectionInvocation(BaseInvocation):
|
||||
"""A collection of integer primitive values"""
|
||||
|
||||
collection: list[int] = InputField(default_factory=list, description="The collection of integer values")
|
||||
collection: list[int] = InputField(default=[], description="The collection of integer values")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IntegerCollectionOutput:
|
||||
return IntegerCollectionOutput(collection=self.collection)
|
||||
@@ -158,12 +158,12 @@ class FloatInvocation(BaseInvocation):
|
||||
title="Float Collection Primitive",
|
||||
tags=["primitives", "float", "collection"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class FloatCollectionInvocation(BaseInvocation):
|
||||
"""A collection of float primitive values"""
|
||||
|
||||
collection: list[float] = InputField(default_factory=list, description="The collection of float values")
|
||||
collection: list[float] = InputField(default=[], description="The collection of float values")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FloatCollectionOutput:
|
||||
return FloatCollectionOutput(collection=self.collection)
|
||||
@@ -205,12 +205,12 @@ class StringInvocation(BaseInvocation):
|
||||
title="String Collection Primitive",
|
||||
tags=["primitives", "string", "collection"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class StringCollectionInvocation(BaseInvocation):
|
||||
"""A collection of string primitive values"""
|
||||
|
||||
collection: list[str] = InputField(default_factory=list, description="The collection of string values")
|
||||
collection: list[str] = InputField(default=[], description="The collection of string values")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> StringCollectionOutput:
|
||||
return StringCollectionOutput(collection=self.collection)
|
||||
@@ -467,13 +467,13 @@ class ConditioningInvocation(BaseInvocation):
|
||||
title="Conditioning Collection Primitive",
|
||||
tags=["primitives", "conditioning", "collection"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class ConditioningCollectionInvocation(BaseInvocation):
|
||||
"""A collection of conditioning tensor primitive values"""
|
||||
|
||||
collection: list[ConditioningField] = InputField(
|
||||
default_factory=list,
|
||||
default=[],
|
||||
description="The collection of conditioning tensors",
|
||||
)
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ class DynamicPromptInvocation(BaseInvocation):
|
||||
title="Prompts from File",
|
||||
tags=["prompt", "file"],
|
||||
category="prompt",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class PromptsFromFileInvocation(BaseInvocation):
|
||||
"""Loads prompts from a text file"""
|
||||
@@ -82,7 +82,7 @@ class PromptsFromFileInvocation(BaseInvocation):
|
||||
end_line = start_line + max_prompts
|
||||
if max_prompts <= 0:
|
||||
end_line = np.iinfo(np.int32).max
|
||||
with open(file_path) as f:
|
||||
with open(file_path, encoding="utf-8") as f:
|
||||
for i, line in enumerate(f):
|
||||
if i >= start_line and i < end_line:
|
||||
prompts.append((pre_prompt or "") + line.strip() + (post_prompt or ""))
|
||||
|
||||
@@ -9,7 +9,6 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
InputField,
|
||||
InvocationContext,
|
||||
OutputField,
|
||||
UIType,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
@@ -59,7 +58,7 @@ class T2IAdapterInvocation(BaseInvocation):
|
||||
ui_order=-1,
|
||||
)
|
||||
weight: Union[float, list[float]] = InputField(
|
||||
default=1, ge=0, description="The weight given to the T2I-Adapter", ui_type=UIType.Float, title="Weight"
|
||||
default=1, ge=0, description="The weight given to the T2I-Adapter", title="Weight"
|
||||
)
|
||||
begin_step_percent: float = InputField(
|
||||
default=0, ge=-1, le=2, description="When the T2I-Adapter is first applied (% of total steps)"
|
||||
|
||||
@@ -2,16 +2,16 @@
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
|
||||
import cv2 as cv
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
from PIL import Image
|
||||
from pydantic import ConfigDict
|
||||
from realesrgan import RealESRGANer
|
||||
|
||||
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
|
||||
from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation
|
||||
@@ -92,9 +92,9 @@ class ESRGANInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
||||
|
||||
esrgan_model_path = Path(f"core/upscaling/realesrgan/{self.model_name}")
|
||||
|
||||
upsampler = RealESRGANer(
|
||||
upscaler = RealESRGAN(
|
||||
scale=netscale,
|
||||
model_path=str(models_path / esrgan_model_path),
|
||||
model_path=models_path / esrgan_model_path,
|
||||
model=rrdbnet_model,
|
||||
half=False,
|
||||
tile=self.tile_size,
|
||||
@@ -102,15 +102,9 @@ class ESRGANInvocation(BaseInvocation, WithWorkflow, WithMetadata):
|
||||
|
||||
# prepare image - Real-ESRGAN uses cv2 internally, and cv2 uses BGR vs RGB for PIL
|
||||
# TODO: This strips the alpha... is that okay?
|
||||
cv_image = cv.cvtColor(np.array(image.convert("RGB")), cv.COLOR_RGB2BGR)
|
||||
|
||||
# We can pass an `outscale` value here, but it just resizes the image by that factor after
|
||||
# upscaling, so it's kinda pointless for our purposes. If you want something other than 4x
|
||||
# upscaling, you'll need to add a resize node after this one.
|
||||
upscaled_image, img_mode = upsampler.enhance(cv_image)
|
||||
|
||||
# back to PIL
|
||||
pil_image = Image.fromarray(cv.cvtColor(upscaled_image, cv.COLOR_BGR2RGB)).convert("RGBA")
|
||||
cv2_image = cv2.cvtColor(np.array(image.convert("RGB")), cv2.COLOR_RGB2BGR)
|
||||
upscaled_image = upscaler.upscale(cv2_image)
|
||||
pil_image = Image.fromarray(cv2.cvtColor(upscaled_image, cv2.COLOR_BGR2RGB)).convert("RGBA")
|
||||
|
||||
torch.cuda.empty_cache()
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
|
||||
@@ -15,7 +15,7 @@ import os
|
||||
import sys
|
||||
from argparse import ArgumentParser
|
||||
from pathlib import Path
|
||||
from typing import ClassVar, Dict, List, Literal, Optional, Union, get_args, get_origin, get_type_hints
|
||||
from typing import Any, ClassVar, Dict, List, Literal, Optional, Union, get_args, get_origin, get_type_hints
|
||||
|
||||
from omegaconf import DictConfig, ListConfig, OmegaConf
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
@@ -24,10 +24,7 @@ from invokeai.app.services.config.config_common import PagingArgumentParser, int
|
||||
|
||||
|
||||
class InvokeAISettings(BaseSettings):
|
||||
"""
|
||||
Runtime configuration settings in which default values are
|
||||
read from an omegaconf .yaml file.
|
||||
"""
|
||||
"""Runtime configuration settings in which default values are read from an omegaconf .yaml file."""
|
||||
|
||||
initconf: ClassVar[Optional[DictConfig]] = None
|
||||
argparse_groups: ClassVar[Dict] = {}
|
||||
@@ -35,6 +32,7 @@ class InvokeAISettings(BaseSettings):
|
||||
model_config = SettingsConfigDict(env_file_encoding="utf-8", arbitrary_types_allowed=True, case_sensitive=True)
|
||||
|
||||
def parse_args(self, argv: Optional[list] = sys.argv[1:]):
|
||||
"""Call to parse command-line arguments."""
|
||||
parser = self.get_parser()
|
||||
opt, unknown_opts = parser.parse_known_args(argv)
|
||||
if len(unknown_opts) > 0:
|
||||
@@ -49,20 +47,19 @@ class InvokeAISettings(BaseSettings):
|
||||
setattr(self, name, value)
|
||||
|
||||
def to_yaml(self) -> str:
|
||||
"""
|
||||
Return a YAML string representing our settings. This can be used
|
||||
as the contents of `invokeai.yaml` to restore settings later.
|
||||
"""
|
||||
"""Return a YAML string representing our settings. This can be used as the contents of `invokeai.yaml` to restore settings later."""
|
||||
cls = self.__class__
|
||||
type = get_args(get_type_hints(cls)["type"])[0]
|
||||
field_dict = {type: {}}
|
||||
field_dict: Dict[str, Dict[str, Any]] = {type: {}}
|
||||
for name, field in self.model_fields.items():
|
||||
if name in cls._excluded_from_yaml():
|
||||
continue
|
||||
assert isinstance(field.json_schema_extra, dict)
|
||||
category = (
|
||||
field.json_schema_extra.get("category", "Uncategorized") if field.json_schema_extra else "Uncategorized"
|
||||
)
|
||||
value = getattr(self, name)
|
||||
assert isinstance(category, str)
|
||||
if category not in field_dict[type]:
|
||||
field_dict[type][category] = {}
|
||||
# keep paths as strings to make it easier to read
|
||||
@@ -72,6 +69,7 @@ class InvokeAISettings(BaseSettings):
|
||||
|
||||
@classmethod
|
||||
def add_parser_arguments(cls, parser):
|
||||
"""Dynamically create arguments for a settings parser."""
|
||||
if "type" in get_type_hints(cls):
|
||||
settings_stanza = get_args(get_type_hints(cls)["type"])[0]
|
||||
else:
|
||||
@@ -116,6 +114,7 @@ class InvokeAISettings(BaseSettings):
|
||||
|
||||
@classmethod
|
||||
def cmd_name(cls, command_field: str = "type") -> str:
|
||||
"""Return the category of a setting."""
|
||||
hints = get_type_hints(cls)
|
||||
if command_field in hints:
|
||||
return get_args(hints[command_field])[0]
|
||||
@@ -124,6 +123,7 @@ class InvokeAISettings(BaseSettings):
|
||||
|
||||
@classmethod
|
||||
def get_parser(cls) -> ArgumentParser:
|
||||
"""Get the command-line parser for a setting."""
|
||||
parser = PagingArgumentParser(
|
||||
prog=cls.cmd_name(),
|
||||
description=cls.__doc__,
|
||||
@@ -152,10 +152,14 @@ class InvokeAISettings(BaseSettings):
|
||||
"free_gpu_mem",
|
||||
"xformers_enabled",
|
||||
"tiled_decode",
|
||||
"lora_dir",
|
||||
"embedding_dir",
|
||||
"controlnet_dir",
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def add_field_argument(cls, command_parser, name: str, field, default_override=None):
|
||||
"""Add the argparse arguments for a setting parser."""
|
||||
field_type = get_type_hints(cls).get(name)
|
||||
default = (
|
||||
default_override
|
||||
|
||||
@@ -177,6 +177,7 @@ from typing import ClassVar, Dict, List, Literal, Optional, Union, get_type_hint
|
||||
|
||||
from omegaconf import DictConfig, OmegaConf
|
||||
from pydantic import Field, TypeAdapter
|
||||
from pydantic.config import JsonDict
|
||||
from pydantic_settings import SettingsConfigDict
|
||||
|
||||
from .config_base import InvokeAISettings
|
||||
@@ -188,28 +189,24 @@ DEFAULT_MAX_VRAM = 0.5
|
||||
|
||||
|
||||
class Categories(object):
|
||||
WebServer = {"category": "Web Server"}
|
||||
Features = {"category": "Features"}
|
||||
Paths = {"category": "Paths"}
|
||||
Logging = {"category": "Logging"}
|
||||
Development = {"category": "Development"}
|
||||
Other = {"category": "Other"}
|
||||
ModelCache = {"category": "Model Cache"}
|
||||
Device = {"category": "Device"}
|
||||
Generation = {"category": "Generation"}
|
||||
Queue = {"category": "Queue"}
|
||||
Nodes = {"category": "Nodes"}
|
||||
MemoryPerformance = {"category": "Memory/Performance"}
|
||||
"""Category headers for configuration variable groups."""
|
||||
|
||||
WebServer: JsonDict = {"category": "Web Server"}
|
||||
Features: JsonDict = {"category": "Features"}
|
||||
Paths: JsonDict = {"category": "Paths"}
|
||||
Logging: JsonDict = {"category": "Logging"}
|
||||
Development: JsonDict = {"category": "Development"}
|
||||
Other: JsonDict = {"category": "Other"}
|
||||
ModelCache: JsonDict = {"category": "Model Cache"}
|
||||
Device: JsonDict = {"category": "Device"}
|
||||
Generation: JsonDict = {"category": "Generation"}
|
||||
Queue: JsonDict = {"category": "Queue"}
|
||||
Nodes: JsonDict = {"category": "Nodes"}
|
||||
MemoryPerformance: JsonDict = {"category": "Memory/Performance"}
|
||||
|
||||
|
||||
class InvokeAIAppConfig(InvokeAISettings):
|
||||
"""
|
||||
Generate images using Stable Diffusion. Use "invokeai" to launch
|
||||
the command-line client (recommended for experts only), or
|
||||
"invokeai-web" to launch the web server. Global options
|
||||
can be changed by editing the file "INVOKEAI_ROOT/invokeai.yaml" or by
|
||||
setting environment variables INVOKEAI_<setting>.
|
||||
"""
|
||||
"""Configuration object for InvokeAI App."""
|
||||
|
||||
singleton_config: ClassVar[Optional[InvokeAIAppConfig]] = None
|
||||
singleton_init: ClassVar[Optional[Dict]] = None
|
||||
@@ -234,15 +231,12 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
|
||||
# PATHS
|
||||
root : Optional[Path] = Field(default=None, description='InvokeAI runtime root directory', json_schema_extra=Categories.Paths)
|
||||
autoimport_dir : Optional[Path] = Field(default=Path('autoimport'), description='Path to a directory of models files to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
conf_path : Optional[Path] = Field(default=Path('configs/models.yaml'), description='Path to models definition file', json_schema_extra=Categories.Paths)
|
||||
models_dir : Optional[Path] = Field(default=Path('models'), description='Path to the models directory', json_schema_extra=Categories.Paths)
|
||||
legacy_conf_dir : Optional[Path] = Field(default=Path('configs/stable-diffusion'), description='Path to directory of legacy checkpoint config files', json_schema_extra=Categories.Paths)
|
||||
db_dir : Optional[Path] = Field(default=Path('databases'), description='Path to InvokeAI databases directory', json_schema_extra=Categories.Paths)
|
||||
outdir : Optional[Path] = Field(default=Path('outputs'), description='Default folder for output images', json_schema_extra=Categories.Paths)
|
||||
autoimport_dir : Path = Field(default=Path('autoimport'), description='Path to a directory of models files to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
conf_path : Path = Field(default=Path('configs/models.yaml'), description='Path to models definition file', json_schema_extra=Categories.Paths)
|
||||
models_dir : Path = Field(default=Path('models'), description='Path to the models directory', json_schema_extra=Categories.Paths)
|
||||
legacy_conf_dir : Path = Field(default=Path('configs/stable-diffusion'), description='Path to directory of legacy checkpoint config files', json_schema_extra=Categories.Paths)
|
||||
db_dir : Path = Field(default=Path('databases'), description='Path to InvokeAI databases directory', json_schema_extra=Categories.Paths)
|
||||
outdir : Path = Field(default=Path('outputs'), description='Default folder for output images', json_schema_extra=Categories.Paths)
|
||||
use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', json_schema_extra=Categories.Paths)
|
||||
custom_nodes_dir : Path = Field(default=Path('nodes'), description='Path to directory for custom nodes', json_schema_extra=Categories.Paths)
|
||||
from_file : Optional[Path] = Field(default=None, description='Take command input from the indicated file (command-line client only)', json_schema_extra=Categories.Paths)
|
||||
@@ -285,11 +279,15 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
|
||||
# DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES
|
||||
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", json_schema_extra=Categories.MemoryPerformance)
|
||||
free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance)
|
||||
max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", json_schema_extra=Categories.MemoryPerformance)
|
||||
max_vram_cache_size : Optional[float] = Field(default=None, ge=0, description="Amount of VRAM reserved for model storage", json_schema_extra=Categories.MemoryPerformance)
|
||||
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", json_schema_extra=Categories.MemoryPerformance)
|
||||
tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.MemoryPerformance)
|
||||
lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
# this is not referred to in the source code and can be removed entirely
|
||||
#free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance)
|
||||
|
||||
# See InvokeAIAppConfig subclass below for CACHE and DEVICE categories
|
||||
# fmt: on
|
||||
@@ -303,8 +301,8 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
clobber=False,
|
||||
):
|
||||
"""
|
||||
Update settings with contents of init file, environment, and
|
||||
command-line settings.
|
||||
Update settings with contents of init file, environment, and command-line settings.
|
||||
|
||||
:param conf: alternate Omegaconf dictionary object
|
||||
:param argv: aternate sys.argv list
|
||||
:param clobber: ovewrite any initialization parameters passed during initialization
|
||||
@@ -337,9 +335,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
|
||||
@classmethod
|
||||
def get_config(cls, **kwargs) -> InvokeAIAppConfig:
|
||||
"""
|
||||
This returns a singleton InvokeAIAppConfig configuration object.
|
||||
"""
|
||||
"""Return a singleton InvokeAIAppConfig configuration object."""
|
||||
if (
|
||||
cls.singleton_config is None
|
||||
or type(cls.singleton_config) is not cls
|
||||
@@ -351,9 +347,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
|
||||
@property
|
||||
def root_path(self) -> Path:
|
||||
"""
|
||||
Path to the runtime root directory
|
||||
"""
|
||||
"""Path to the runtime root directory."""
|
||||
if self.root:
|
||||
root = Path(self.root).expanduser().absolute()
|
||||
else:
|
||||
@@ -363,9 +357,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
|
||||
@property
|
||||
def root_dir(self) -> Path:
|
||||
"""
|
||||
Alias for above.
|
||||
"""
|
||||
"""Alias for above."""
|
||||
return self.root_path
|
||||
|
||||
def _resolve(self, partial_path: Path) -> Path:
|
||||
@@ -373,108 +365,95 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
|
||||
@property
|
||||
def init_file_path(self) -> Path:
|
||||
"""
|
||||
Path to invokeai.yaml
|
||||
"""
|
||||
return self._resolve(INIT_FILE)
|
||||
"""Path to invokeai.yaml."""
|
||||
resolved_path = self._resolve(INIT_FILE)
|
||||
assert resolved_path is not None
|
||||
return resolved_path
|
||||
|
||||
@property
|
||||
def output_path(self) -> Path:
|
||||
"""
|
||||
Path to defaults outputs directory.
|
||||
"""
|
||||
def output_path(self) -> Optional[Path]:
|
||||
"""Path to defaults outputs directory."""
|
||||
return self._resolve(self.outdir)
|
||||
|
||||
@property
|
||||
def db_path(self) -> Path:
|
||||
"""
|
||||
Path to the invokeai.db file.
|
||||
"""
|
||||
return self._resolve(self.db_dir) / DB_FILE
|
||||
"""Path to the invokeai.db file."""
|
||||
db_dir = self._resolve(self.db_dir)
|
||||
assert db_dir is not None
|
||||
return db_dir / DB_FILE
|
||||
|
||||
@property
|
||||
def model_conf_path(self) -> Path:
|
||||
"""
|
||||
Path to models configuration file.
|
||||
"""
|
||||
def model_conf_path(self) -> Optional[Path]:
|
||||
"""Path to models configuration file."""
|
||||
return self._resolve(self.conf_path)
|
||||
|
||||
@property
|
||||
def legacy_conf_path(self) -> Path:
|
||||
"""
|
||||
Path to directory of legacy configuration files (e.g. v1-inference.yaml)
|
||||
"""
|
||||
def legacy_conf_path(self) -> Optional[Path]:
|
||||
"""Path to directory of legacy configuration files (e.g. v1-inference.yaml)."""
|
||||
return self._resolve(self.legacy_conf_dir)
|
||||
|
||||
@property
|
||||
def models_path(self) -> Path:
|
||||
"""
|
||||
Path to the models directory
|
||||
"""
|
||||
def models_path(self) -> Optional[Path]:
|
||||
"""Path to the models directory."""
|
||||
return self._resolve(self.models_dir)
|
||||
|
||||
@property
|
||||
def custom_nodes_path(self) -> Path:
|
||||
"""
|
||||
Path to the custom nodes directory
|
||||
"""
|
||||
return self._resolve(self.custom_nodes_dir)
|
||||
"""Path to the custom nodes directory."""
|
||||
custom_nodes_path = self._resolve(self.custom_nodes_dir)
|
||||
assert custom_nodes_path is not None
|
||||
return custom_nodes_path
|
||||
|
||||
# the following methods support legacy calls leftover from the Globals era
|
||||
@property
|
||||
def full_precision(self) -> bool:
|
||||
"""Return true if precision set to float32"""
|
||||
"""Return true if precision set to float32."""
|
||||
return self.precision == "float32"
|
||||
|
||||
@property
|
||||
def try_patchmatch(self) -> bool:
|
||||
"""Return true if patchmatch true"""
|
||||
"""Return true if patchmatch true."""
|
||||
return self.patchmatch
|
||||
|
||||
@property
|
||||
def nsfw_checker(self) -> bool:
|
||||
"""NSFW node is always active and disabled from Web UIe"""
|
||||
"""Return value for NSFW checker. The NSFW node is always active and disabled from Web UI."""
|
||||
return True
|
||||
|
||||
@property
|
||||
def invisible_watermark(self) -> bool:
|
||||
"""invisible watermark node is always active and disabled from Web UIe"""
|
||||
"""Return value of invisible watermark. It is always active and disabled from Web UI."""
|
||||
return True
|
||||
|
||||
@property
|
||||
def ram_cache_size(self) -> Union[Literal["auto"], float]:
|
||||
"""Return the ram cache size using the legacy or modern setting."""
|
||||
return self.max_cache_size or self.ram
|
||||
|
||||
@property
|
||||
def vram_cache_size(self) -> Union[Literal["auto"], float]:
|
||||
"""Return the vram cache size using the legacy or modern setting."""
|
||||
return self.max_vram_cache_size or self.vram
|
||||
|
||||
@property
|
||||
def use_cpu(self) -> bool:
|
||||
"""Return true if the device is set to CPU or the always_use_cpu flag is set."""
|
||||
return self.always_use_cpu or self.device == "cpu"
|
||||
|
||||
@property
|
||||
def disable_xformers(self) -> bool:
|
||||
"""
|
||||
Return true if enable_xformers is false (reversed logic)
|
||||
and attention type is not set to xformers.
|
||||
"""
|
||||
"""Return true if enable_xformers is false (reversed logic) and attention type is not set to xformers."""
|
||||
disabled_in_config = not self.xformers_enabled
|
||||
return disabled_in_config and self.attention_type != "xformers"
|
||||
|
||||
@staticmethod
|
||||
def find_root() -> Path:
|
||||
"""
|
||||
Choose the runtime root directory when not specified on command line or
|
||||
init file.
|
||||
"""
|
||||
"""Choose the runtime root directory when not specified on command line or init file."""
|
||||
return _find_root()
|
||||
|
||||
|
||||
def get_invokeai_config(**kwargs) -> InvokeAIAppConfig:
|
||||
"""
|
||||
Legacy function which returns InvokeAIAppConfig.get_config()
|
||||
"""
|
||||
"""Legacy function which returns InvokeAIAppConfig.get_config()."""
|
||||
return InvokeAIAppConfig.get_config(**kwargs)
|
||||
|
||||
|
||||
|
||||
@@ -48,7 +48,6 @@ from typing import List, Optional, Union
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelConfigBase,
|
||||
ModelConfigFactory,
|
||||
ModelType,
|
||||
)
|
||||
@@ -158,7 +157,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
("version", CONFIG_FILE_VERSION),
|
||||
)
|
||||
|
||||
def add_model(self, key: str, config: Union[dict, ModelConfigBase]) -> AnyModelConfig:
|
||||
def add_model(self, key: str, config: Union[dict, AnyModelConfig]) -> AnyModelConfig:
|
||||
"""
|
||||
Add a model to the database.
|
||||
|
||||
@@ -255,7 +254,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
self._db.conn.rollback()
|
||||
raise e
|
||||
|
||||
def update_model(self, key: str, config: ModelConfigBase) -> AnyModelConfig:
|
||||
def update_model(self, key: str, config: Union[dict, AnyModelConfig]) -> AnyModelConfig:
|
||||
"""
|
||||
Update the model, returning the updated version.
|
||||
|
||||
@@ -368,7 +367,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
results = [ModelConfigFactory.make_config(json.loads(x[0])) for x in self._cursor.fetchall()]
|
||||
return results
|
||||
|
||||
def search_by_path(self, path: Union[str, Path]) -> List[ModelConfigBase]:
|
||||
def search_by_path(self, path: Union[str, Path]) -> List[AnyModelConfig]:
|
||||
"""Return models with the indicated path."""
|
||||
results = []
|
||||
with self._db.lock:
|
||||
@@ -382,7 +381,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
results = [ModelConfigFactory.make_config(json.loads(x[0])) for x in self._cursor.fetchall()]
|
||||
return results
|
||||
|
||||
def search_by_hash(self, hash: str) -> List[ModelConfigBase]:
|
||||
def search_by_hash(self, hash: str) -> List[AnyModelConfig]:
|
||||
"""Return models with the indicated original_hash."""
|
||||
results = []
|
||||
with self._db.lock:
|
||||
|
||||
@@ -22,6 +22,11 @@ class SessionProcessorBase(ABC):
|
||||
"""Pauses the session processor"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def take_one(self) -> SessionProcessorStatus:
|
||||
"""Takes one session from the queue and executes it"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_status(self) -> SessionProcessorStatus:
|
||||
"""Gets the status of the session processor"""
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import traceback
|
||||
from threading import BoundedSemaphore
|
||||
from threading import BoundedSemaphore, Thread
|
||||
from threading import Event as ThreadEvent
|
||||
from threading import Thread
|
||||
from typing import Optional
|
||||
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
@@ -26,6 +25,7 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
self.__resume_event = ThreadEvent()
|
||||
self.__stop_event = ThreadEvent()
|
||||
self.__poll_now_event = ThreadEvent()
|
||||
self.__take_one_event = ThreadEvent()
|
||||
|
||||
local_handler.register(event_name=EventServiceBase.queue_event, _func=self._on_queue_event)
|
||||
|
||||
@@ -37,6 +37,7 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
"stop_event": self.__stop_event,
|
||||
"poll_now_event": self.__poll_now_event,
|
||||
"resume_event": self.__resume_event,
|
||||
"take_one_event": self.__take_one_event,
|
||||
},
|
||||
)
|
||||
self.__thread.start()
|
||||
@@ -82,6 +83,13 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
self.__resume_event.clear()
|
||||
return self.get_status()
|
||||
|
||||
def take_one(self) -> SessionProcessorStatus:
|
||||
if self.__queue_item is None and not self.__resume_event.is_set():
|
||||
self.__resume_event.set()
|
||||
self.__take_one_event.set()
|
||||
self._poll_now()
|
||||
return self.get_status()
|
||||
|
||||
def get_status(self) -> SessionProcessorStatus:
|
||||
return SessionProcessorStatus(
|
||||
is_started=self.__resume_event.is_set(),
|
||||
@@ -93,9 +101,11 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
stop_event: ThreadEvent,
|
||||
poll_now_event: ThreadEvent,
|
||||
resume_event: ThreadEvent,
|
||||
take_one_event: ThreadEvent,
|
||||
):
|
||||
try:
|
||||
stop_event.clear()
|
||||
take_one_event.clear()
|
||||
resume_event.set()
|
||||
self.__threadLimit.acquire()
|
||||
queue_item: Optional[SessionQueueItem] = None
|
||||
@@ -119,6 +129,10 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
)
|
||||
queue_item = None
|
||||
|
||||
if take_one_event.is_set():
|
||||
resume_event.clear()
|
||||
take_one_event.clear()
|
||||
|
||||
if queue_item is None:
|
||||
self.__invoker.services.logger.debug("Waiting for next polling interval or event")
|
||||
poll_now_event.wait(POLLING_INTERVAL)
|
||||
|
||||
@@ -49,7 +49,7 @@ class Edge(BaseModel):
|
||||
|
||||
def get_output_field(node: BaseInvocation, field: str) -> Any:
|
||||
node_type = type(node)
|
||||
node_outputs = get_type_hints(node_type.get_output_type())
|
||||
node_outputs = get_type_hints(node_type.get_output_annotation())
|
||||
node_output_field = node_outputs.get(field) or None
|
||||
return node_output_field
|
||||
|
||||
@@ -188,7 +188,7 @@ class GraphInvocationOutput(BaseInvocationOutput):
|
||||
|
||||
|
||||
# TODO: Fill this out and move to invocations
|
||||
@invocation("graph")
|
||||
@invocation("graph", version="1.0.0")
|
||||
class GraphInvocation(BaseInvocation):
|
||||
"""Execute a graph"""
|
||||
|
||||
@@ -205,7 +205,7 @@ class IterateInvocationOutput(BaseInvocationOutput):
|
||||
"""Used to connect iteration outputs. Will be expanded to a specific output."""
|
||||
|
||||
item: Any = OutputField(
|
||||
description="The item being iterated over", title="Collection Item", ui_type=UIType.CollectionItem
|
||||
description="The item being iterated over", title="Collection Item", ui_type=UIType._CollectionItem
|
||||
)
|
||||
|
||||
|
||||
@@ -215,7 +215,7 @@ class IterateInvocation(BaseInvocation):
|
||||
"""Iterates over a list of items"""
|
||||
|
||||
collection: list[Any] = InputField(
|
||||
description="The list of items to iterate over", default_factory=list, ui_type=UIType.Collection
|
||||
description="The list of items to iterate over", default=[], ui_type=UIType._Collection
|
||||
)
|
||||
index: int = InputField(description="The index, will be provided on executed iterators", default=0, ui_hidden=True)
|
||||
|
||||
@@ -227,7 +227,7 @@ class IterateInvocation(BaseInvocation):
|
||||
@invocation_output("collect_output")
|
||||
class CollectInvocationOutput(BaseInvocationOutput):
|
||||
collection: list[Any] = OutputField(
|
||||
description="The collection of input items", title="Collection", ui_type=UIType.Collection
|
||||
description="The collection of input items", title="Collection", ui_type=UIType._Collection
|
||||
)
|
||||
|
||||
|
||||
@@ -238,12 +238,12 @@ class CollectInvocation(BaseInvocation):
|
||||
item: Optional[Any] = InputField(
|
||||
default=None,
|
||||
description="The item to collect (all inputs must be of the same type)",
|
||||
ui_type=UIType.CollectionItem,
|
||||
ui_type=UIType._CollectionItem,
|
||||
title="Collection Item",
|
||||
input=Input.Connection,
|
||||
)
|
||||
collection: list[Any] = InputField(
|
||||
description="The collection, will be provided on execution", default_factory=list, ui_hidden=True
|
||||
description="The collection, will be provided on execution", default=[], ui_hidden=True
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> CollectInvocationOutput:
|
||||
@@ -379,7 +379,7 @@ class Graph(BaseModel):
|
||||
raise NodeNotFoundError(f"Edge destination node {edge.destination.node_id} does not exist in the graph")
|
||||
|
||||
# output fields are not on the node object directly, they are on the output type
|
||||
if edge.source.field not in source_node.get_output_type().model_fields:
|
||||
if edge.source.field not in source_node.get_output_annotation().model_fields:
|
||||
raise NodeFieldNotFoundError(
|
||||
f"Edge source field {edge.source.field} does not exist in node {edge.source.node_id}"
|
||||
)
|
||||
|
||||
@@ -2,6 +2,7 @@ class FieldDescriptions:
|
||||
denoising_start = "When to start denoising, expressed a percentage of total steps"
|
||||
denoising_end = "When to stop denoising, expressed a percentage of total steps"
|
||||
cfg_scale = "Classifier-Free Guidance scale"
|
||||
cfg_rescale_multiplier = "Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR"
|
||||
scheduler = "Scheduler to use during inference"
|
||||
positive_cond = "Positive conditioning tensor"
|
||||
negative_cond = "Negative conditioning tensor"
|
||||
|
||||
29
invokeai/backend/image_util/realesrgan/LICENSE
Normal file
29
invokeai/backend/image_util/realesrgan/LICENSE
Normal file
@@ -0,0 +1,29 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2021, Xintao Wang
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
0
invokeai/backend/image_util/realesrgan/__init__.py
Normal file
0
invokeai/backend/image_util/realesrgan/__init__.py
Normal file
274
invokeai/backend/image_util/realesrgan/realesrgan.py
Normal file
274
invokeai/backend/image_util/realesrgan/realesrgan.py
Normal file
@@ -0,0 +1,274 @@
|
||||
import math
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import torch
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
from cv2.typing import MatLike
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
|
||||
"""
|
||||
Adapted from https://github.com/xinntao/Real-ESRGAN/blob/master/realesrgan/utils.py
|
||||
License is BSD3, copied to `LICENSE` in this directory.
|
||||
|
||||
The adaptation here has a few changes:
|
||||
- Remove print statements, use `tqdm` to show progress
|
||||
- Remove unused "outscale" logic, which simply scales the final image to a given factor
|
||||
- Remove `dni_weight` logic, which was only used when multiple models were used
|
||||
- Remove logic to fetch models from network
|
||||
- Add types, rename a few things
|
||||
"""
|
||||
|
||||
|
||||
class ImageMode(str, Enum):
|
||||
L = "L"
|
||||
RGB = "RGB"
|
||||
RGBA = "RGBA"
|
||||
|
||||
|
||||
class RealESRGAN:
|
||||
"""A helper class for upsampling images with RealESRGAN.
|
||||
|
||||
Args:
|
||||
scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4.
|
||||
model_path (str): The path to the pretrained model. It can be urls (will first download it automatically).
|
||||
model (nn.Module): The defined network. Default: None.
|
||||
tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop
|
||||
input images into tiles, and then process each of them. Finally, they will be merged into one image.
|
||||
0 denotes for do not use tile. Default: 0.
|
||||
tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10.
|
||||
pre_pad (int): Pad the input images to avoid border artifacts. Default: 10.
|
||||
half (float): Whether to use half precision during inference. Default: False.
|
||||
"""
|
||||
|
||||
output: torch.Tensor
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
scale: int,
|
||||
model_path: Path,
|
||||
model: RRDBNet,
|
||||
tile: int = 0,
|
||||
tile_pad: int = 10,
|
||||
pre_pad: int = 10,
|
||||
half: bool = False,
|
||||
) -> None:
|
||||
self.scale = scale
|
||||
self.tile_size = tile
|
||||
self.tile_pad = tile_pad
|
||||
self.pre_pad = pre_pad
|
||||
self.mod_scale: Optional[int] = None
|
||||
self.half = half
|
||||
self.device = choose_torch_device()
|
||||
|
||||
loadnet = torch.load(model_path, map_location=torch.device("cpu"))
|
||||
|
||||
# prefer to use params_ema
|
||||
if "params_ema" in loadnet:
|
||||
keyname = "params_ema"
|
||||
else:
|
||||
keyname = "params"
|
||||
|
||||
model.load_state_dict(loadnet[keyname], strict=True)
|
||||
model.eval()
|
||||
self.model = model.to(self.device)
|
||||
|
||||
if self.half:
|
||||
self.model = self.model.half()
|
||||
|
||||
def pre_process(self, img: MatLike) -> None:
|
||||
"""Pre-process, such as pre-pad and mod pad, so that the images can be divisible"""
|
||||
img_tensor: torch.Tensor = torch.from_numpy(np.transpose(img, (2, 0, 1))).float()
|
||||
self.img = img_tensor.unsqueeze(0).to(self.device)
|
||||
if self.half:
|
||||
self.img = self.img.half()
|
||||
|
||||
# pre_pad
|
||||
if self.pre_pad != 0:
|
||||
self.img = torch.nn.functional.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), "reflect")
|
||||
# mod pad for divisible borders
|
||||
if self.scale == 2:
|
||||
self.mod_scale = 2
|
||||
elif self.scale == 1:
|
||||
self.mod_scale = 4
|
||||
if self.mod_scale is not None:
|
||||
self.mod_pad_h, self.mod_pad_w = 0, 0
|
||||
_, _, h, w = self.img.size()
|
||||
if h % self.mod_scale != 0:
|
||||
self.mod_pad_h = self.mod_scale - h % self.mod_scale
|
||||
if w % self.mod_scale != 0:
|
||||
self.mod_pad_w = self.mod_scale - w % self.mod_scale
|
||||
self.img = torch.nn.functional.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), "reflect")
|
||||
|
||||
def process(self) -> None:
|
||||
# model inference
|
||||
self.output = self.model(self.img)
|
||||
|
||||
def tile_process(self) -> None:
|
||||
"""It will first crop input images to tiles, and then process each tile.
|
||||
Finally, all the processed tiles are merged into one images.
|
||||
|
||||
Modified from: https://github.com/ata4/esrgan-launcher
|
||||
"""
|
||||
batch, channel, height, width = self.img.shape
|
||||
output_height = height * self.scale
|
||||
output_width = width * self.scale
|
||||
output_shape = (batch, channel, output_height, output_width)
|
||||
|
||||
# start with black image
|
||||
self.output = self.img.new_zeros(output_shape)
|
||||
tiles_x = math.ceil(width / self.tile_size)
|
||||
tiles_y = math.ceil(height / self.tile_size)
|
||||
|
||||
# loop over all tiles
|
||||
total_steps = tiles_y * tiles_x
|
||||
for i in tqdm(range(total_steps), desc="Upscaling"):
|
||||
y = i // tiles_x
|
||||
x = i % tiles_x
|
||||
# extract tile from input image
|
||||
ofs_x = x * self.tile_size
|
||||
ofs_y = y * self.tile_size
|
||||
# input tile area on total image
|
||||
input_start_x = ofs_x
|
||||
input_end_x = min(ofs_x + self.tile_size, width)
|
||||
input_start_y = ofs_y
|
||||
input_end_y = min(ofs_y + self.tile_size, height)
|
||||
|
||||
# input tile area on total image with padding
|
||||
input_start_x_pad = max(input_start_x - self.tile_pad, 0)
|
||||
input_end_x_pad = min(input_end_x + self.tile_pad, width)
|
||||
input_start_y_pad = max(input_start_y - self.tile_pad, 0)
|
||||
input_end_y_pad = min(input_end_y + self.tile_pad, height)
|
||||
|
||||
# input tile dimensions
|
||||
input_tile_width = input_end_x - input_start_x
|
||||
input_tile_height = input_end_y - input_start_y
|
||||
input_tile = self.img[
|
||||
:,
|
||||
:,
|
||||
input_start_y_pad:input_end_y_pad,
|
||||
input_start_x_pad:input_end_x_pad,
|
||||
]
|
||||
|
||||
# upscale tile
|
||||
with torch.no_grad():
|
||||
output_tile = self.model(input_tile)
|
||||
|
||||
# output tile area on total image
|
||||
output_start_x = input_start_x * self.scale
|
||||
output_end_x = input_end_x * self.scale
|
||||
output_start_y = input_start_y * self.scale
|
||||
output_end_y = input_end_y * self.scale
|
||||
|
||||
# output tile area without padding
|
||||
output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale
|
||||
output_end_x_tile = output_start_x_tile + input_tile_width * self.scale
|
||||
output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale
|
||||
output_end_y_tile = output_start_y_tile + input_tile_height * self.scale
|
||||
|
||||
# put tile into output image
|
||||
self.output[:, :, output_start_y:output_end_y, output_start_x:output_end_x] = output_tile[
|
||||
:,
|
||||
:,
|
||||
output_start_y_tile:output_end_y_tile,
|
||||
output_start_x_tile:output_end_x_tile,
|
||||
]
|
||||
|
||||
def post_process(self) -> torch.Tensor:
|
||||
# remove extra pad
|
||||
if self.mod_scale is not None:
|
||||
_, _, h, w = self.output.size()
|
||||
self.output = self.output[
|
||||
:,
|
||||
:,
|
||||
0 : h - self.mod_pad_h * self.scale,
|
||||
0 : w - self.mod_pad_w * self.scale,
|
||||
]
|
||||
# remove prepad
|
||||
if self.pre_pad != 0:
|
||||
_, _, h, w = self.output.size()
|
||||
self.output = self.output[
|
||||
:,
|
||||
:,
|
||||
0 : h - self.pre_pad * self.scale,
|
||||
0 : w - self.pre_pad * self.scale,
|
||||
]
|
||||
return self.output
|
||||
|
||||
@torch.no_grad()
|
||||
def upscale(self, img: MatLike, esrgan_alpha_upscale: bool = True) -> npt.NDArray[Any]:
|
||||
np_img = img.astype(np.float32)
|
||||
alpha: Optional[np.ndarray] = None
|
||||
if np.max(np_img) > 256:
|
||||
# 16-bit image
|
||||
max_range = 65535
|
||||
else:
|
||||
max_range = 255
|
||||
np_img = np_img / max_range
|
||||
if len(np_img.shape) == 2:
|
||||
# grayscale image
|
||||
img_mode = ImageMode.L
|
||||
np_img = cv2.cvtColor(np_img, cv2.COLOR_GRAY2RGB)
|
||||
elif np_img.shape[2] == 4:
|
||||
# RGBA image with alpha channel
|
||||
img_mode = ImageMode.RGBA
|
||||
alpha = np_img[:, :, 3]
|
||||
np_img = np_img[:, :, 0:3]
|
||||
np_img = cv2.cvtColor(np_img, cv2.COLOR_BGR2RGB)
|
||||
if esrgan_alpha_upscale:
|
||||
alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB)
|
||||
else:
|
||||
img_mode = ImageMode.RGB
|
||||
np_img = cv2.cvtColor(np_img, cv2.COLOR_BGR2RGB)
|
||||
|
||||
# ------------------- process image (without the alpha channel) ------------------- #
|
||||
self.pre_process(np_img)
|
||||
if self.tile_size > 0:
|
||||
self.tile_process()
|
||||
else:
|
||||
self.process()
|
||||
output_tensor = self.post_process()
|
||||
output_img: npt.NDArray[Any] = output_tensor.data.squeeze().float().cpu().clamp_(0, 1).numpy()
|
||||
output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0))
|
||||
if img_mode is ImageMode.L:
|
||||
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# ------------------- process the alpha channel if necessary ------------------- #
|
||||
if img_mode is ImageMode.RGBA:
|
||||
if esrgan_alpha_upscale:
|
||||
assert alpha is not None
|
||||
self.pre_process(alpha)
|
||||
if self.tile_size > 0:
|
||||
self.tile_process()
|
||||
else:
|
||||
self.process()
|
||||
output_alpha_tensor = self.post_process()
|
||||
output_alpha: npt.NDArray[Any] = output_alpha_tensor.data.squeeze().float().cpu().clamp_(0, 1).numpy()
|
||||
output_alpha = np.transpose(output_alpha[[2, 1, 0], :, :], (1, 2, 0))
|
||||
output_alpha = cv2.cvtColor(output_alpha, cv2.COLOR_BGR2GRAY)
|
||||
else: # use the cv2 resize for alpha channel
|
||||
assert alpha is not None
|
||||
h, w = alpha.shape[0:2]
|
||||
output_alpha = cv2.resize(
|
||||
alpha,
|
||||
(w * self.scale, h * self.scale),
|
||||
interpolation=cv2.INTER_LINEAR,
|
||||
)
|
||||
|
||||
# merge the alpha channel
|
||||
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2BGRA)
|
||||
output_img[:, :, 3] = output_alpha
|
||||
|
||||
# ------------------------------ return ------------------------------ #
|
||||
if max_range == 65535: # 16-bit image
|
||||
output = (output_img * 65535.0).round().astype(np.uint16)
|
||||
else:
|
||||
output = (output_img * 255.0).round().astype(np.uint8)
|
||||
|
||||
return output
|
||||
@@ -54,6 +54,44 @@ class ImageProjModel(torch.nn.Module):
|
||||
return clip_extra_context_tokens
|
||||
|
||||
|
||||
class MLPProjModel(torch.nn.Module):
|
||||
"""SD model with image prompt"""
|
||||
|
||||
def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024):
|
||||
super().__init__()
|
||||
|
||||
self.proj = torch.nn.Sequential(
|
||||
torch.nn.Linear(clip_embeddings_dim, clip_embeddings_dim),
|
||||
torch.nn.GELU(),
|
||||
torch.nn.Linear(clip_embeddings_dim, cross_attention_dim),
|
||||
torch.nn.LayerNorm(cross_attention_dim),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_state_dict(cls, state_dict: dict[torch.Tensor]):
|
||||
"""Initialize an MLPProjModel from a state_dict.
|
||||
|
||||
The cross_attention_dim and clip_embeddings_dim are inferred from the shape of the tensors in the state_dict.
|
||||
|
||||
Args:
|
||||
state_dict (dict[torch.Tensor]): The state_dict of model weights.
|
||||
|
||||
Returns:
|
||||
MLPProjModel
|
||||
"""
|
||||
cross_attention_dim = state_dict["proj.3.weight"].shape[0]
|
||||
clip_embeddings_dim = state_dict["proj.0.weight"].shape[0]
|
||||
|
||||
model = cls(cross_attention_dim, clip_embeddings_dim)
|
||||
|
||||
model.load_state_dict(state_dict)
|
||||
return model
|
||||
|
||||
def forward(self, image_embeds):
|
||||
clip_extra_context_tokens = self.proj(image_embeds)
|
||||
return clip_extra_context_tokens
|
||||
|
||||
|
||||
class IPAdapter:
|
||||
"""IP-Adapter: https://arxiv.org/pdf/2308.06721.pdf"""
|
||||
|
||||
@@ -130,6 +168,13 @@ class IPAdapterPlus(IPAdapter):
|
||||
return image_prompt_embeds, uncond_image_prompt_embeds
|
||||
|
||||
|
||||
class IPAdapterFull(IPAdapterPlus):
|
||||
"""IP-Adapter Plus with full features."""
|
||||
|
||||
def _init_image_proj_model(self, state_dict: dict[torch.Tensor]):
|
||||
return MLPProjModel.from_state_dict(state_dict).to(self.device, dtype=self.dtype)
|
||||
|
||||
|
||||
class IPAdapterPlusXL(IPAdapterPlus):
|
||||
"""IP-Adapter Plus for SDXL."""
|
||||
|
||||
@@ -149,11 +194,9 @@ def build_ip_adapter(
|
||||
) -> Union[IPAdapter, IPAdapterPlus]:
|
||||
state_dict = torch.load(ip_adapter_ckpt_path, map_location="cpu")
|
||||
|
||||
# Determine if the state_dict is from an IPAdapter or IPAdapterPlus based on the image_proj weights that it
|
||||
# contains.
|
||||
is_plus = "proj.weight" not in state_dict["image_proj"]
|
||||
|
||||
if is_plus:
|
||||
if "proj.weight" in state_dict["image_proj"]: # IPAdapter (with ImageProjModel).
|
||||
return IPAdapter(state_dict, device=device, dtype=dtype)
|
||||
elif "proj_in.weight" in state_dict["image_proj"]: # IPAdaterPlus or IPAdapterPlusXL (with Resampler).
|
||||
cross_attention_dim = state_dict["ip_adapter"]["1.to_k_ip.weight"].shape[-1]
|
||||
if cross_attention_dim == 768:
|
||||
# SD1 IP-Adapter Plus
|
||||
@@ -163,5 +206,7 @@ def build_ip_adapter(
|
||||
return IPAdapterPlusXL(state_dict, device=device, dtype=dtype)
|
||||
else:
|
||||
raise Exception(f"Unsupported IP-Adapter Plus cross-attention dimension: {cross_attention_dim}.")
|
||||
elif "proj.0.weight" in state_dict["image_proj"]: # IPAdapterFull (with MLPProjModel).
|
||||
return IPAdapterFull(state_dict, device=device, dtype=dtype)
|
||||
else:
|
||||
return IPAdapter(state_dict, device=device, dtype=dtype)
|
||||
raise ValueError(f"'{ip_adapter_ckpt_path}' has an unrecognized IP-Adapter model architecture.")
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
# ruff: noqa: I001, F401
|
||||
"""
|
||||
Initialization file for invokeai.backend.model_management
|
||||
"""
|
||||
# This import must be first
|
||||
from .model_manager import ModelManager, ModelInfo, AddModelResult, SchedulerPredictionType # noqa: F401 isort: split
|
||||
from .model_manager import AddModelResult, ModelInfo, ModelManager, SchedulerPredictionType
|
||||
from .lora import ModelPatcher, ONNXModelPatcher
|
||||
from .model_cache import ModelCache
|
||||
|
||||
from .lora import ModelPatcher, ONNXModelPatcher # noqa: F401
|
||||
from .model_cache import ModelCache # noqa: F401
|
||||
from .models import ( # noqa: F401
|
||||
from .models import (
|
||||
BaseModelType,
|
||||
DuplicateModelException,
|
||||
ModelNotFoundException,
|
||||
@@ -16,4 +17,4 @@ from .models import ( # noqa: F401
|
||||
)
|
||||
|
||||
# This import must be last
|
||||
from .model_merge import ModelMerger, MergeInterpolationMethod # noqa: F401 isort: split
|
||||
from .model_merge import MergeInterpolationMethod, ModelMerger
|
||||
|
||||
@@ -53,6 +53,7 @@ class ModelProbe(object):
|
||||
"StableDiffusionXLPipeline": ModelType.Main,
|
||||
"StableDiffusionXLImg2ImgPipeline": ModelType.Main,
|
||||
"StableDiffusionXLInpaintPipeline": ModelType.Main,
|
||||
"LatentConsistencyModelPipeline": ModelType.Main,
|
||||
"AutoencoderKL": ModelType.Vae,
|
||||
"AutoencoderTiny": ModelType.Vae,
|
||||
"ControlNetModel": ModelType.ControlNet,
|
||||
@@ -224,7 +225,7 @@ class ModelProbe(object):
|
||||
with SilenceWarnings():
|
||||
if model_path.suffix.endswith((".ckpt", ".pt", ".bin")):
|
||||
cls._scan_model(model_path, model_path)
|
||||
return torch.load(model_path)
|
||||
return torch.load(model_path, map_location="cpu")
|
||||
else:
|
||||
return safetensors.torch.load_file(model_path)
|
||||
|
||||
|
||||
@@ -607,11 +607,14 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
if isinstance(guidance_scale, list):
|
||||
guidance_scale = guidance_scale[step_index]
|
||||
|
||||
noise_pred = self.invokeai_diffuser._combine(
|
||||
uc_noise_pred,
|
||||
c_noise_pred,
|
||||
guidance_scale,
|
||||
)
|
||||
noise_pred = self.invokeai_diffuser._combine(uc_noise_pred, c_noise_pred, guidance_scale)
|
||||
guidance_rescale_multiplier = conditioning_data.guidance_rescale_multiplier
|
||||
if guidance_rescale_multiplier > 0:
|
||||
noise_pred = self._rescale_cfg(
|
||||
noise_pred,
|
||||
c_noise_pred,
|
||||
guidance_rescale_multiplier,
|
||||
)
|
||||
|
||||
# compute the previous noisy sample x_t -> x_t-1
|
||||
step_output = self.scheduler.step(noise_pred, timestep, latents, **conditioning_data.scheduler_args)
|
||||
@@ -634,6 +637,16 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
|
||||
return step_output
|
||||
|
||||
@staticmethod
|
||||
def _rescale_cfg(total_noise_pred, pos_noise_pred, multiplier=0.7):
|
||||
"""Implementation of Algorithm 2 from https://arxiv.org/pdf/2305.08891.pdf."""
|
||||
ro_pos = torch.std(pos_noise_pred, dim=(1, 2, 3), keepdim=True)
|
||||
ro_cfg = torch.std(total_noise_pred, dim=(1, 2, 3), keepdim=True)
|
||||
|
||||
x_rescaled = total_noise_pred * (ro_pos / ro_cfg)
|
||||
x_final = multiplier * x_rescaled + (1.0 - multiplier) * total_noise_pred
|
||||
return x_final
|
||||
|
||||
def _unet_forward(
|
||||
self,
|
||||
latents,
|
||||
|
||||
@@ -67,13 +67,17 @@ class IPAdapterConditioningInfo:
|
||||
class ConditioningData:
|
||||
unconditioned_embeddings: BasicConditioningInfo
|
||||
text_embeddings: BasicConditioningInfo
|
||||
guidance_scale: Union[float, List[float]]
|
||||
"""
|
||||
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
||||
`guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf).
|
||||
Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate
|
||||
images that are closely linked to the text `prompt`, usually at the expense of lower image quality.
|
||||
"""
|
||||
guidance_scale: Union[float, List[float]]
|
||||
""" for models trained using zero-terminal SNR ("ztsnr"), it's suggested to use guidance_rescale_multiplier of 0.7 .
|
||||
ref [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf)
|
||||
"""
|
||||
guidance_rescale_multiplier: float = 0
|
||||
extra: Optional[ExtraConditioningInfo] = None
|
||||
scheduler_args: dict[str, Any] = field(default_factory=dict)
|
||||
"""
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein and The InvokeAI Development Team
|
||||
|
||||
"""invokeai.backend.util.logging
|
||||
|
||||
Logging class for InvokeAI that produces console messages
|
||||
"""
|
||||
Logging class for InvokeAI that produces console messages.
|
||||
|
||||
Usage:
|
||||
|
||||
@@ -178,8 +177,8 @@ InvokeAI:
|
||||
import logging.handlers
|
||||
import socket
|
||||
import urllib.parse
|
||||
from abc import abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
@@ -192,36 +191,36 @@ except ImportError:
|
||||
|
||||
|
||||
# module level functions
|
||||
def debug(msg, *args, **kwargs):
|
||||
def debug(msg: str, *args: str, **kwargs: Any) -> None: # noqa D103
|
||||
InvokeAILogger.get_logger().debug(msg, *args, **kwargs)
|
||||
|
||||
|
||||
def info(msg, *args, **kwargs):
|
||||
def info(msg: str, *args: str, **kwargs: Any) -> None: # noqa D103
|
||||
InvokeAILogger.get_logger().info(msg, *args, **kwargs)
|
||||
|
||||
|
||||
def warning(msg, *args, **kwargs):
|
||||
def warning(msg: str, *args: str, **kwargs: Any) -> None: # noqa D103
|
||||
InvokeAILogger.get_logger().warning(msg, *args, **kwargs)
|
||||
|
||||
|
||||
def error(msg, *args, **kwargs):
|
||||
def error(msg: str, *args: str, **kwargs: Any) -> None: # noqa D103
|
||||
InvokeAILogger.get_logger().error(msg, *args, **kwargs)
|
||||
|
||||
|
||||
def critical(msg, *args, **kwargs):
|
||||
def critical(msg: str, *args: str, **kwargs: Any) -> None: # noqa D103
|
||||
InvokeAILogger.get_logger().critical(msg, *args, **kwargs)
|
||||
|
||||
|
||||
def log(level, msg, *args, **kwargs):
|
||||
def log(level: int, msg: str, *args: str, **kwargs: Any) -> None: # noqa D103
|
||||
InvokeAILogger.get_logger().log(level, msg, *args, **kwargs)
|
||||
|
||||
|
||||
def disable(level=logging.CRITICAL):
|
||||
InvokeAILogger.get_logger().disable(level)
|
||||
def disable(level: int = logging.CRITICAL) -> None: # noqa D103
|
||||
logging.disable(level)
|
||||
|
||||
|
||||
def basicConfig(**kwargs):
|
||||
InvokeAILogger.get_logger().basicConfig(**kwargs)
|
||||
def basicConfig(**kwargs: Any) -> None: # noqa D103
|
||||
logging.basicConfig(**kwargs)
|
||||
|
||||
|
||||
_FACILITY_MAP = (
|
||||
@@ -256,33 +255,25 @@ _SOCK_MAP = {
|
||||
|
||||
|
||||
class InvokeAIFormatter(logging.Formatter):
|
||||
"""
|
||||
Base class for logging formatter
|
||||
"""Base class for logging formatter."""
|
||||
|
||||
"""
|
||||
|
||||
def format(self, record):
|
||||
def format(self, record: logging.LogRecord) -> str: # noqa D102
|
||||
formatter = logging.Formatter(self.log_fmt(record.levelno))
|
||||
return formatter.format(record)
|
||||
|
||||
@abstractmethod
|
||||
def log_fmt(self, levelno: int) -> str:
|
||||
pass
|
||||
def log_fmt(self, levelno: int) -> str: # noqa D102
|
||||
return "[%(asctime)s]::[%(name)s]::%(levelname)s --> %(message)s"
|
||||
|
||||
|
||||
class InvokeAISyslogFormatter(InvokeAIFormatter):
|
||||
"""
|
||||
Formatting for syslog
|
||||
"""
|
||||
"""Formatting for syslog."""
|
||||
|
||||
def log_fmt(self, levelno: int) -> str:
|
||||
def log_fmt(self, levelno: int) -> str: # noqa D102
|
||||
return "%(name)s [%(process)d] <%(levelname)s> %(message)s"
|
||||
|
||||
|
||||
class InvokeAILegacyLogFormatter(InvokeAIFormatter):
|
||||
"""
|
||||
Formatting for the InvokeAI Logger (legacy version)
|
||||
"""
|
||||
class InvokeAILegacyLogFormatter(InvokeAIFormatter): # noqa D102
|
||||
"""Formatting for the InvokeAI Logger (legacy version)."""
|
||||
|
||||
FORMATS = {
|
||||
logging.DEBUG: " | %(message)s",
|
||||
@@ -292,23 +283,21 @@ class InvokeAILegacyLogFormatter(InvokeAIFormatter):
|
||||
logging.CRITICAL: "### %(message)s",
|
||||
}
|
||||
|
||||
def log_fmt(self, levelno: int) -> str:
|
||||
return self.FORMATS.get(levelno)
|
||||
def log_fmt(self, levelno: int) -> str: # noqa D102
|
||||
format = self.FORMATS.get(levelno)
|
||||
assert format is not None
|
||||
return format
|
||||
|
||||
|
||||
class InvokeAIPlainLogFormatter(InvokeAIFormatter):
|
||||
"""
|
||||
Custom Formatting for the InvokeAI Logger (plain version)
|
||||
"""
|
||||
"""Custom Formatting for the InvokeAI Logger (plain version)."""
|
||||
|
||||
def log_fmt(self, levelno: int) -> str:
|
||||
def log_fmt(self, levelno: int) -> str: # noqa D102
|
||||
return "[%(asctime)s]::[%(name)s]::%(levelname)s --> %(message)s"
|
||||
|
||||
|
||||
class InvokeAIColorLogFormatter(InvokeAIFormatter):
|
||||
"""
|
||||
Custom Formatting for the InvokeAI Logger
|
||||
"""
|
||||
"""Custom Formatting for the InvokeAI Logger."""
|
||||
|
||||
# Color Codes
|
||||
grey = "\x1b[38;20m"
|
||||
@@ -331,8 +320,10 @@ class InvokeAIColorLogFormatter(InvokeAIFormatter):
|
||||
logging.CRITICAL: bold_red + log_format + reset,
|
||||
}
|
||||
|
||||
def log_fmt(self, levelno: int) -> str:
|
||||
return self.FORMATS.get(levelno)
|
||||
def log_fmt(self, levelno: int) -> str: # noqa D102
|
||||
format = self.FORMATS.get(levelno)
|
||||
assert format is not None
|
||||
return format
|
||||
|
||||
|
||||
LOG_FORMATTERS = {
|
||||
@@ -343,13 +334,13 @@ LOG_FORMATTERS = {
|
||||
}
|
||||
|
||||
|
||||
class InvokeAILogger(object):
|
||||
loggers = {}
|
||||
class InvokeAILogger(object): # noqa D102
|
||||
loggers: Dict[str, logging.Logger] = {}
|
||||
|
||||
@classmethod
|
||||
def get_logger(
|
||||
cls, name: str = "InvokeAI", config: InvokeAIAppConfig = InvokeAIAppConfig.get_config()
|
||||
) -> logging.Logger:
|
||||
) -> logging.Logger: # noqa D102
|
||||
if name in cls.loggers:
|
||||
logger = cls.loggers[name]
|
||||
logger.handlers.clear()
|
||||
@@ -362,7 +353,7 @@ class InvokeAILogger(object):
|
||||
return cls.loggers[name]
|
||||
|
||||
@classmethod
|
||||
def get_loggers(cls, config: InvokeAIAppConfig) -> list[logging.Handler]:
|
||||
def get_loggers(cls, config: InvokeAIAppConfig) -> list[logging.Handler]: # noqa D102
|
||||
handler_strs = config.log_handlers
|
||||
handlers = []
|
||||
for handler in handler_strs:
|
||||
@@ -374,7 +365,7 @@ class InvokeAILogger(object):
|
||||
# http gets no custom formatter
|
||||
formatter = LOG_FORMATTERS[config.log_format]
|
||||
if handler_name == "console":
|
||||
ch = logging.StreamHandler()
|
||||
ch: logging.Handler = logging.StreamHandler()
|
||||
ch.setFormatter(formatter())
|
||||
handlers.append(ch)
|
||||
|
||||
@@ -393,18 +384,18 @@ class InvokeAILogger(object):
|
||||
return handlers
|
||||
|
||||
@staticmethod
|
||||
def _parse_syslog_args(args: str = None) -> logging.Handler:
|
||||
def _parse_syslog_args(args: Optional[str] = None) -> logging.Handler:
|
||||
if not SYSLOG_AVAILABLE:
|
||||
raise ValueError("syslog is not available on this system")
|
||||
if not args:
|
||||
args = "/dev/log" if Path("/dev/log").exists() else "address:localhost:514"
|
||||
syslog_args = {}
|
||||
syslog_args: Dict[str, Any] = {}
|
||||
try:
|
||||
for a in args.split(","):
|
||||
arg_name, *arg_value = a.split(":", 2)
|
||||
if arg_name == "address":
|
||||
host, *port = arg_value
|
||||
port = 514 if len(port) == 0 else int(port[0])
|
||||
host, *port_list = arg_value
|
||||
port = 514 if not port_list else int(port_list[0])
|
||||
syslog_args["address"] = (host, port)
|
||||
elif arg_name == "facility":
|
||||
syslog_args["facility"] = _FACILITY_MAP[arg_value[0]]
|
||||
@@ -417,13 +408,13 @@ class InvokeAILogger(object):
|
||||
return logging.handlers.SysLogHandler(**syslog_args)
|
||||
|
||||
@staticmethod
|
||||
def _parse_file_args(args: str = None) -> logging.Handler:
|
||||
def _parse_file_args(args: Optional[str] = None) -> logging.Handler: # noqa D102
|
||||
if not args:
|
||||
raise ValueError("please provide filename for file logging using format 'file=/path/to/logfile.txt'")
|
||||
return logging.FileHandler(args)
|
||||
|
||||
@staticmethod
|
||||
def _parse_http_args(args: str = None) -> logging.Handler:
|
||||
def _parse_http_args(args: Optional[str] = None) -> logging.Handler: # noqa D102
|
||||
if not args:
|
||||
raise ValueError("please provide destination for http logging using format 'http=url'")
|
||||
arg_list = args.split(",")
|
||||
@@ -434,12 +425,12 @@ class InvokeAILogger(object):
|
||||
path = url.path
|
||||
port = url.port or 80
|
||||
|
||||
syslog_args = {}
|
||||
syslog_args: Dict[str, Any] = {}
|
||||
for a in arg_list:
|
||||
arg_name, *arg_value = a.split(":", 2)
|
||||
if arg_name == "method":
|
||||
arg_value = arg_value[0] if len(arg_value) > 0 else "GET"
|
||||
syslog_args[arg_name] = arg_value
|
||||
method = arg_value[0] if len(arg_value) > 0 else "GET"
|
||||
syslog_args[arg_name] = method
|
||||
else: # TODO: Provide support for SSL context and credentials
|
||||
pass
|
||||
return logging.handlers.HTTPHandler(f"{host}:{port}", path, **syslog_args)
|
||||
|
||||
@@ -20,10 +20,18 @@ module.exports = {
|
||||
ecmaVersion: 2018,
|
||||
sourceType: 'module',
|
||||
},
|
||||
plugins: ['react', '@typescript-eslint', 'eslint-plugin-react-hooks'],
|
||||
plugins: [
|
||||
'react',
|
||||
'@typescript-eslint',
|
||||
'eslint-plugin-react-hooks',
|
||||
'i18next',
|
||||
'path',
|
||||
],
|
||||
root: true,
|
||||
rules: {
|
||||
'path/no-relative-imports': ['error', { maxDepth: 0 }],
|
||||
curly: 'error',
|
||||
'i18next/no-literal-string': 2,
|
||||
'react/jsx-no-bind': ['error', { allowBind: true }],
|
||||
'react/jsx-curly-brace-presence': [
|
||||
'error',
|
||||
|
||||
@@ -9,6 +9,5 @@ index.html
|
||||
.yalc/
|
||||
*.scss
|
||||
src/services/api/schema.d.ts
|
||||
docs/
|
||||
static/
|
||||
src/theme/css/overlayscrollbars.css
|
||||
|
||||
171
invokeai/frontend/web/dist/assets/App-6440ab3b.js
vendored
Normal file
171
invokeai/frontend/web/dist/assets/App-6440ab3b.js
vendored
Normal file
File diff suppressed because one or more lines are too long
171
invokeai/frontend/web/dist/assets/App-cd19f6f7.js
vendored
171
invokeai/frontend/web/dist/assets/App-cd19f6f7.js
vendored
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
invokeai/frontend/web/dist/assets/MantineProvider-a6a1d85c.js
vendored
Normal file
1
invokeai/frontend/web/dist/assets/MantineProvider-a6a1d85c.js
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,4 +1,4 @@
|
||||
import{w as s,ie as T,v as l,_ as I,ig as R,aa as V,ih as z,ii as j,ij as D,ik as F,il as G,im as W,io as K,az as H,ip as U,iq as Y}from"./index-c553e366.js";import{M as Z}from"./MantineProvider-094ba0de.js";var P=String.raw,E=P`
|
||||
import{I as s,ie as T,v as l,$ as A,ig as R,aa as V,ih as z,ii as j,ij as D,ik as F,il as G,im as W,io as K,az as H,ip as U,iq as Y}from"./index-f820e2e3.js";import{M as Z}from"./MantineProvider-a6a1d85c.js";var P=String.raw,E=P`
|
||||
:root,
|
||||
:host {
|
||||
--chakra-vh: 100vh;
|
||||
@@ -277,4 +277,4 @@ import{w as s,ie as T,v as l,_ as I,ig as R,aa as V,ih as z,ii as j,ij as D,ik a
|
||||
}
|
||||
|
||||
${E}
|
||||
`}),g={light:"chakra-ui-light",dark:"chakra-ui-dark"};function Q(e={}){const{preventTransition:o=!0}=e,n={setDataset:r=>{const t=o?n.preventTransition():void 0;document.documentElement.dataset.theme=r,document.documentElement.style.colorScheme=r,t==null||t()},setClassName(r){document.body.classList.add(r?g.dark:g.light),document.body.classList.remove(r?g.light:g.dark)},query(){return window.matchMedia("(prefers-color-scheme: dark)")},getSystemTheme(r){var t;return((t=n.query().matches)!=null?t:r==="dark")?"dark":"light"},addListener(r){const t=n.query(),i=a=>{r(a.matches?"dark":"light")};return typeof t.addListener=="function"?t.addListener(i):t.addEventListener("change",i),()=>{typeof t.removeListener=="function"?t.removeListener(i):t.removeEventListener("change",i)}},preventTransition(){const r=document.createElement("style");return r.appendChild(document.createTextNode("*{-webkit-transition:none!important;-moz-transition:none!important;-o-transition:none!important;-ms-transition:none!important;transition:none!important}")),document.head.appendChild(r),()=>{window.getComputedStyle(document.body),requestAnimationFrame(()=>{requestAnimationFrame(()=>{document.head.removeChild(r)})})}}};return n}var X="chakra-ui-color-mode";function L(e){return{ssr:!1,type:"localStorage",get(o){if(!(globalThis!=null&&globalThis.document))return o;let n;try{n=localStorage.getItem(e)||o}catch{}return n||o},set(o){try{localStorage.setItem(e,o)}catch{}}}}var ee=L(X),M=()=>{};function S(e,o){return e.type==="cookie"&&e.ssr?e.get(o):o}function O(e){const{value:o,children:n,options:{useSystemColorMode:r,initialColorMode:t,disableTransitionOnChange:i}={},colorModeManager:a=ee}=e,d=t==="dark"?"dark":"light",[u,p]=l.useState(()=>S(a,d)),[y,b]=l.useState(()=>S(a)),{getSystemTheme:w,setClassName:k,setDataset:x,addListener:$}=l.useMemo(()=>Q({preventTransition:i}),[i]),v=t==="system"&&!u?y:u,c=l.useCallback(m=>{const f=m==="system"?w():m;p(f),k(f==="dark"),x(f),a.set(f)},[a,w,k,x]);I(()=>{t==="system"&&b(w())},[]),l.useEffect(()=>{const m=a.get();if(m){c(m);return}if(t==="system"){c("system");return}c(d)},[a,d,t,c]);const C=l.useCallback(()=>{c(v==="dark"?"light":"dark")},[v,c]);l.useEffect(()=>{if(r)return $(c)},[r,$,c]);const A=l.useMemo(()=>({colorMode:o??v,toggleColorMode:o?M:C,setColorMode:o?M:c,forced:o!==void 0}),[v,C,c,o]);return s.jsx(R.Provider,{value:A,children:n})}O.displayName="ColorModeProvider";var te=["borders","breakpoints","colors","components","config","direction","fonts","fontSizes","fontWeights","letterSpacings","lineHeights","radii","shadows","sizes","space","styles","transition","zIndices"];function re(e){return V(e)?te.every(o=>Object.prototype.hasOwnProperty.call(e,o)):!1}function h(e){return typeof e=="function"}function oe(...e){return o=>e.reduce((n,r)=>r(n),o)}var ne=e=>function(...n){let r=[...n],t=n[n.length-1];return re(t)&&r.length>1?r=r.slice(0,r.length-1):t=e,oe(...r.map(i=>a=>h(i)?i(a):ae(a,i)))(t)},ie=ne(j);function ae(...e){return z({},...e,_)}function _(e,o,n,r){if((h(e)||h(o))&&Object.prototype.hasOwnProperty.call(r,n))return(...t)=>{const i=h(e)?e(...t):e,a=h(o)?o(...t):o;return z({},i,a,_)}}var q=l.createContext({getDocument(){return document},getWindow(){return window}});q.displayName="EnvironmentContext";function N(e){const{children:o,environment:n,disabled:r}=e,t=l.useRef(null),i=l.useMemo(()=>n||{getDocument:()=>{var d,u;return(u=(d=t.current)==null?void 0:d.ownerDocument)!=null?u:document},getWindow:()=>{var d,u;return(u=(d=t.current)==null?void 0:d.ownerDocument.defaultView)!=null?u:window}},[n]),a=!r||!n;return s.jsxs(q.Provider,{value:i,children:[o,a&&s.jsx("span",{id:"__chakra_env",hidden:!0,ref:t})]})}N.displayName="EnvironmentProvider";var se=e=>{const{children:o,colorModeManager:n,portalZIndex:r,resetScope:t,resetCSS:i=!0,theme:a={},environment:d,cssVarsRoot:u,disableEnvironment:p,disableGlobalStyle:y}=e,b=s.jsx(N,{environment:d,disabled:p,children:o});return s.jsx(D,{theme:a,cssVarsRoot:u,children:s.jsxs(O,{colorModeManager:n,options:a.config,children:[i?s.jsx(J,{scope:t}):s.jsx(B,{}),!y&&s.jsx(F,{}),r?s.jsx(G,{zIndex:r,children:b}):b]})})},le=e=>function({children:n,theme:r=e,toastOptions:t,...i}){return s.jsxs(se,{theme:r,...i,children:[s.jsx(W,{value:t==null?void 0:t.defaultOptions,children:n}),s.jsx(K,{...t})]})},de=le(j);const ue=()=>l.useMemo(()=>({colorScheme:"dark",fontFamily:"'Inter Variable', sans-serif",components:{ScrollArea:{defaultProps:{scrollbarSize:10},styles:{scrollbar:{"&:hover":{backgroundColor:"var(--invokeai-colors-baseAlpha-300)"}},thumb:{backgroundColor:"var(--invokeai-colors-baseAlpha-300)"}}}}}),[]),ce=L("@@invokeai-color-mode");function me({children:e}){const{i18n:o}=H(),n=o.dir(),r=l.useMemo(()=>ie({...U,direction:n}),[n]);l.useEffect(()=>{document.body.dir=n},[n]);const t=ue();return s.jsx(Z,{theme:t,children:s.jsx(de,{theme:r,colorModeManager:ce,toastOptions:Y,children:e})})}const ve=l.memo(me);export{ve as default};
|
||||
`}),g={light:"chakra-ui-light",dark:"chakra-ui-dark"};function Q(e={}){const{preventTransition:o=!0}=e,n={setDataset:r=>{const t=o?n.preventTransition():void 0;document.documentElement.dataset.theme=r,document.documentElement.style.colorScheme=r,t==null||t()},setClassName(r){document.body.classList.add(r?g.dark:g.light),document.body.classList.remove(r?g.light:g.dark)},query(){return window.matchMedia("(prefers-color-scheme: dark)")},getSystemTheme(r){var t;return((t=n.query().matches)!=null?t:r==="dark")?"dark":"light"},addListener(r){const t=n.query(),i=a=>{r(a.matches?"dark":"light")};return typeof t.addListener=="function"?t.addListener(i):t.addEventListener("change",i),()=>{typeof t.removeListener=="function"?t.removeListener(i):t.removeEventListener("change",i)}},preventTransition(){const r=document.createElement("style");return r.appendChild(document.createTextNode("*{-webkit-transition:none!important;-moz-transition:none!important;-o-transition:none!important;-ms-transition:none!important;transition:none!important}")),document.head.appendChild(r),()=>{window.getComputedStyle(document.body),requestAnimationFrame(()=>{requestAnimationFrame(()=>{document.head.removeChild(r)})})}}};return n}var X="chakra-ui-color-mode";function L(e){return{ssr:!1,type:"localStorage",get(o){if(!(globalThis!=null&&globalThis.document))return o;let n;try{n=localStorage.getItem(e)||o}catch{}return n||o},set(o){try{localStorage.setItem(e,o)}catch{}}}}var ee=L(X),M=()=>{};function S(e,o){return e.type==="cookie"&&e.ssr?e.get(o):o}function O(e){const{value:o,children:n,options:{useSystemColorMode:r,initialColorMode:t,disableTransitionOnChange:i}={},colorModeManager:a=ee}=e,d=t==="dark"?"dark":"light",[u,p]=l.useState(()=>S(a,d)),[y,b]=l.useState(()=>S(a)),{getSystemTheme:w,setClassName:k,setDataset:x,addListener:$}=l.useMemo(()=>Q({preventTransition:i}),[i]),v=t==="system"&&!u?y:u,c=l.useCallback(m=>{const f=m==="system"?w():m;p(f),k(f==="dark"),x(f),a.set(f)},[a,w,k,x]);A(()=>{t==="system"&&b(w())},[]),l.useEffect(()=>{const m=a.get();if(m){c(m);return}if(t==="system"){c("system");return}c(d)},[a,d,t,c]);const C=l.useCallback(()=>{c(v==="dark"?"light":"dark")},[v,c]);l.useEffect(()=>{if(r)return $(c)},[r,$,c]);const N=l.useMemo(()=>({colorMode:o??v,toggleColorMode:o?M:C,setColorMode:o?M:c,forced:o!==void 0}),[v,C,c,o]);return s.jsx(R.Provider,{value:N,children:n})}O.displayName="ColorModeProvider";var te=["borders","breakpoints","colors","components","config","direction","fonts","fontSizes","fontWeights","letterSpacings","lineHeights","radii","shadows","sizes","space","styles","transition","zIndices"];function re(e){return V(e)?te.every(o=>Object.prototype.hasOwnProperty.call(e,o)):!1}function h(e){return typeof e=="function"}function oe(...e){return o=>e.reduce((n,r)=>r(n),o)}var ne=e=>function(...n){let r=[...n],t=n[n.length-1];return re(t)&&r.length>1?r=r.slice(0,r.length-1):t=e,oe(...r.map(i=>a=>h(i)?i(a):ae(a,i)))(t)},ie=ne(j);function ae(...e){return z({},...e,_)}function _(e,o,n,r){if((h(e)||h(o))&&Object.prototype.hasOwnProperty.call(r,n))return(...t)=>{const i=h(e)?e(...t):e,a=h(o)?o(...t):o;return z({},i,a,_)}}var q=l.createContext({getDocument(){return document},getWindow(){return window}});q.displayName="EnvironmentContext";function I(e){const{children:o,environment:n,disabled:r}=e,t=l.useRef(null),i=l.useMemo(()=>n||{getDocument:()=>{var d,u;return(u=(d=t.current)==null?void 0:d.ownerDocument)!=null?u:document},getWindow:()=>{var d,u;return(u=(d=t.current)==null?void 0:d.ownerDocument.defaultView)!=null?u:window}},[n]),a=!r||!n;return s.jsxs(q.Provider,{value:i,children:[o,a&&s.jsx("span",{id:"__chakra_env",hidden:!0,ref:t})]})}I.displayName="EnvironmentProvider";var se=e=>{const{children:o,colorModeManager:n,portalZIndex:r,resetScope:t,resetCSS:i=!0,theme:a={},environment:d,cssVarsRoot:u,disableEnvironment:p,disableGlobalStyle:y}=e,b=s.jsx(I,{environment:d,disabled:p,children:o});return s.jsx(D,{theme:a,cssVarsRoot:u,children:s.jsxs(O,{colorModeManager:n,options:a.config,children:[i?s.jsx(J,{scope:t}):s.jsx(B,{}),!y&&s.jsx(F,{}),r?s.jsx(G,{zIndex:r,children:b}):b]})})},le=e=>function({children:n,theme:r=e,toastOptions:t,...i}){return s.jsxs(se,{theme:r,...i,children:[s.jsx(W,{value:t==null?void 0:t.defaultOptions,children:n}),s.jsx(K,{...t})]})},de=le(j);const ue=()=>l.useMemo(()=>({colorScheme:"dark",fontFamily:"'Inter Variable', sans-serif",components:{ScrollArea:{defaultProps:{scrollbarSize:10},styles:{scrollbar:{"&:hover":{backgroundColor:"var(--invokeai-colors-baseAlpha-300)"}},thumb:{backgroundColor:"var(--invokeai-colors-baseAlpha-300)"}}}}}),[]),ce=L("@@invokeai-color-mode");function me({children:e}){const{i18n:o}=H(),n=o.dir(),r=l.useMemo(()=>ie({...U,direction:n}),[n]);l.useEffect(()=>{document.body.dir=n},[n]);const t=ue();return s.jsx(Z,{theme:t,children:s.jsx(de,{theme:r,colorModeManager:ce,toastOptions:Y,children:e})})}const ve=l.memo(me);export{ve as default};
|
||||
9
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-f5f9aabf.css
vendored
Normal file
9
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-f5f9aabf.css
vendored
Normal file
File diff suppressed because one or more lines are too long
156
invokeai/frontend/web/dist/assets/index-c553e366.js
vendored
156
invokeai/frontend/web/dist/assets/index-c553e366.js
vendored
File diff suppressed because one or more lines are too long
157
invokeai/frontend/web/dist/assets/index-f820e2e3.js
vendored
Normal file
157
invokeai/frontend/web/dist/assets/index-f820e2e3.js
vendored
Normal file
File diff suppressed because one or more lines are too long
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-wght-normal-1c3007b8.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-wght-normal-1c3007b8.woff2
vendored
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-wght-normal-eba94878.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-wght-normal-eba94878.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-wght-normal-81f77e51.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-wght-normal-81f77e51.woff2
vendored
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-wght-normal-d92c6cbc.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-wght-normal-d92c6cbc.woff2
vendored
Normal file
Binary file not shown.
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-wght-normal-a2bfd9fe.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-wght-normal-a2bfd9fe.woff2
vendored
Normal file
Binary file not shown.
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-wght-normal-88df0b5a.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-wght-normal-88df0b5a.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-vietnamese-wght-normal-15df7612.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-vietnamese-wght-normal-15df7612.woff2
vendored
Normal file
Binary file not shown.
Binary file not shown.
2
invokeai/frontend/web/dist/index.html
vendored
2
invokeai/frontend/web/dist/index.html
vendored
@@ -15,7 +15,7 @@
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
<script type="module" crossorigin src="./assets/index-c553e366.js"></script>
|
||||
<script type="module" crossorigin src="./assets/index-f820e2e3.js"></script>
|
||||
</head>
|
||||
|
||||
<body dir="ltr">
|
||||
|
||||
166
invokeai/frontend/web/dist/locales/de.json
vendored
166
invokeai/frontend/web/dist/locales/de.json
vendored
@@ -113,7 +113,14 @@
|
||||
"images": "Bilder",
|
||||
"copy": "Kopieren",
|
||||
"download": "Runterladen",
|
||||
"setCurrentImage": "Setze aktuelle Bild"
|
||||
"setCurrentImage": "Setze aktuelle Bild",
|
||||
"featuresWillReset": "Wenn Sie dieses Bild löschen, werden diese Funktionen sofort zurückgesetzt.",
|
||||
"deleteImageBin": "Gelöschte Bilder werden an den Papierkorb Ihres Betriebssystems gesendet.",
|
||||
"unableToLoad": "Galerie kann nicht geladen werden",
|
||||
"downloadSelection": "Auswahl herunterladen",
|
||||
"currentlyInUse": "Dieses Bild wird derzeit in den folgenden Funktionen verwendet:",
|
||||
"deleteImagePermanent": "Gelöschte Bilder können nicht wiederhergestellt werden.",
|
||||
"autoAssignBoardOnClick": "Board per Klick automatisch zuweisen"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Tastenkürzel",
|
||||
@@ -323,7 +330,8 @@
|
||||
},
|
||||
"nodesHotkeys": "Knoten Tastenkürzel",
|
||||
"addNodes": {
|
||||
"title": "Knotenpunkt hinzufügen"
|
||||
"title": "Knotenpunkt hinzufügen",
|
||||
"desc": "Öffnet das Menü zum Hinzufügen von Knoten"
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
@@ -429,7 +437,43 @@
|
||||
"customConfigFileLocation": "Benutzerdefinierte Konfiguration Datei Speicherort",
|
||||
"baseModel": "Basis Modell",
|
||||
"convertToDiffusers": "Konvertiere zu Diffusers",
|
||||
"diffusersModels": "Diffusers"
|
||||
"diffusersModels": "Diffusers",
|
||||
"noCustomLocationProvided": "Kein benutzerdefinierter Standort angegeben",
|
||||
"onnxModels": "Onnx",
|
||||
"vaeRepoID": "VAE-Repo-ID",
|
||||
"weightedSum": "Gewichtete Summe",
|
||||
"syncModelsDesc": "Wenn Ihre Modelle nicht mit dem Backend synchronisiert sind, können Sie sie mit dieser Option aktualisieren. Dies ist im Allgemeinen praktisch, wenn Sie Ihre models.yaml-Datei manuell aktualisieren oder Modelle zum InvokeAI-Stammordner hinzufügen, nachdem die Anwendung gestartet wurde.",
|
||||
"vae": "VAE",
|
||||
"noModels": "Keine Modelle gefunden",
|
||||
"statusConverting": "Konvertieren",
|
||||
"sigmoid": "Sigmoid",
|
||||
"predictionType": "Vorhersagetyp (für Stable Diffusion 2.x-Modelle und gelegentliche Stable Diffusion 1.x-Modelle)",
|
||||
"selectModel": "Wählen Sie Modell aus",
|
||||
"repo_id": "Repo-ID",
|
||||
"modelSyncFailed": "Modellsynchronisierung fehlgeschlagen",
|
||||
"quickAdd": "Schnell hinzufügen",
|
||||
"simpleModelDesc": "Geben Sie einen Pfad zu einem lokalen Diffusers-Modell, einem lokalen Checkpoint-/Safetensors-Modell, einer HuggingFace-Repo-ID oder einer Checkpoint-/Diffusers-Modell-URL an.",
|
||||
"modelDeleted": "Modell gelöscht",
|
||||
"inpainting": "v1 Ausmalen",
|
||||
"modelUpdateFailed": "Modellaktualisierung fehlgeschlagen",
|
||||
"useCustomConfig": "Benutzerdefinierte Konfiguration verwenden",
|
||||
"settings": "Einstellungen",
|
||||
"modelConversionFailed": "Modellkonvertierung fehlgeschlagen",
|
||||
"syncModels": "Modelle synchronisieren",
|
||||
"mergedModelSaveLocation": "Speicherort",
|
||||
"modelType": "Modelltyp",
|
||||
"modelsMerged": "Modelle zusammengeführt",
|
||||
"modelsMergeFailed": "Modellzusammenführung fehlgeschlagen",
|
||||
"convertToDiffusersHelpText1": "Dieses Modell wird in das 🧨 Diffusers-Format konvertiert.",
|
||||
"modelsSynced": "Modelle synchronisiert",
|
||||
"vaePrecision": "VAE-Präzision",
|
||||
"mergeModels": "Modelle zusammenführen",
|
||||
"interpolationType": "Interpolationstyp",
|
||||
"oliveModels": "Olives",
|
||||
"variant": "Variante",
|
||||
"loraModels": "LoRAs",
|
||||
"modelDeleteFailed": "Modell konnte nicht gelöscht werden",
|
||||
"mergedModelName": "Zusammengeführter Modellname"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Bilder",
|
||||
@@ -716,7 +760,33 @@
|
||||
"saveControlImage": "Speichere Referenz Bild",
|
||||
"safe": "Speichern",
|
||||
"ipAdapterImageFallback": "Kein IP Adapter Bild ausgewählt",
|
||||
"resetIPAdapterImage": "Zurücksetzen vom IP Adapter Bild"
|
||||
"resetIPAdapterImage": "Zurücksetzen vom IP Adapter Bild",
|
||||
"pidi": "PIDI",
|
||||
"normalBae": "Normales BAE",
|
||||
"mlsdDescription": "Minimalistischer Liniensegmentdetektor",
|
||||
"openPoseDescription": "Schätzung der menschlichen Pose mit Openpose",
|
||||
"control": "Kontrolle",
|
||||
"coarse": "Coarse",
|
||||
"crop": "Zuschneiden",
|
||||
"pidiDescription": "PIDI-Bildverarbeitung",
|
||||
"mediapipeFace": "Mediapipe Gesichter",
|
||||
"mlsd": "M-LSD",
|
||||
"controlMode": "Steuermodus",
|
||||
"cannyDescription": "Canny Ecken Erkennung",
|
||||
"lineart": "Lineart",
|
||||
"lineartAnimeDescription": "Lineart-Verarbeitung im Anime-Stil",
|
||||
"minConfidence": "Minimales Vertrauen",
|
||||
"megaControl": "Mega-Kontrolle",
|
||||
"autoConfigure": "Prozessor automatisch konfigurieren",
|
||||
"normalBaeDescription": "Normale BAE-Verarbeitung",
|
||||
"noneDescription": "Es wurde keine Verarbeitung angewendet",
|
||||
"openPose": "Openpose",
|
||||
"lineartAnime": "Lineart Anime",
|
||||
"mediapipeFaceDescription": "Gesichtserkennung mit Mediapipe",
|
||||
"canny": "Canny",
|
||||
"hedDescription": "Ganzheitlich verschachtelte Kantenerkennung",
|
||||
"scribble": "Scribble",
|
||||
"maxFaces": "Maximal Anzahl Gesichter"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@@ -758,7 +828,19 @@
|
||||
"enqueueing": "Stapel in der Warteschlange",
|
||||
"queueMaxExceeded": "Maximum von {{max_queue_size}} Elementen erreicht, würde {{skip}} Elemente überspringen",
|
||||
"cancelBatchFailed": "Problem beim Abbruch vom Stapel",
|
||||
"clearQueueAlertDialog2": "bist du sicher die Warteschlange zu leeren?"
|
||||
"clearQueueAlertDialog2": "bist du sicher die Warteschlange zu leeren?",
|
||||
"pruneSucceeded": "{{item_count}} abgeschlossene Elemente aus der Warteschlange entfernt",
|
||||
"pauseSucceeded": "Prozessor angehalten",
|
||||
"cancelFailed": "Problem beim Stornieren des Auftrags",
|
||||
"pauseFailed": "Problem beim Anhalten des Prozessors",
|
||||
"front": "Vorne",
|
||||
"pruneTooltip": "Bereinigen Sie {{item_count}} abgeschlossene Aufträge",
|
||||
"resumeFailed": "Problem beim wieder aufnehmen von Prozessor",
|
||||
"pruneFailed": "Problem beim leeren der Warteschlange",
|
||||
"pauseTooltip": "Pause von Prozessor",
|
||||
"back": "Hinten",
|
||||
"resumeSucceeded": "Prozessor wieder aufgenommen",
|
||||
"resumeTooltip": "Prozessor wieder aufnehmen"
|
||||
},
|
||||
"metadata": {
|
||||
"negativePrompt": "Negativ Beschreibung",
|
||||
@@ -773,7 +855,20 @@
|
||||
"noMetaData": "Keine Meta-Data gefunden",
|
||||
"width": "Breite",
|
||||
"createdBy": "Erstellt von",
|
||||
"steps": "Schritte"
|
||||
"steps": "Schritte",
|
||||
"seamless": "Nahtlos",
|
||||
"positivePrompt": "Positiver Prompt",
|
||||
"generationMode": "Generierungsmodus",
|
||||
"Threshold": "Noise Schwelle",
|
||||
"seed": "Samen",
|
||||
"perlin": "Perlin Noise",
|
||||
"hiresFix": "Optimierung für hohe Auflösungen",
|
||||
"initImage": "Erstes Bild",
|
||||
"variations": "Samengewichtspaare",
|
||||
"vae": "VAE",
|
||||
"workflow": "Arbeitsablauf",
|
||||
"scheduler": "Scheduler",
|
||||
"noRecallParameters": "Es wurden keine Parameter zum Abrufen gefunden"
|
||||
},
|
||||
"popovers": {
|
||||
"noiseUseCPU": {
|
||||
@@ -811,11 +906,68 @@
|
||||
"misses": "Cache Nötig",
|
||||
"hits": "Cache Treffer",
|
||||
"enable": "Aktivieren",
|
||||
"clear": "Leeren"
|
||||
"clear": "Leeren",
|
||||
"maxCacheSize": "Maximale Cache Größe",
|
||||
"cacheSize": "Cache Größe"
|
||||
},
|
||||
"embedding": {
|
||||
"noMatchingEmbedding": "Keine passenden Embeddings",
|
||||
"addEmbedding": "Embedding hinzufügen",
|
||||
"incompatibleModel": "Inkompatibles Basismodell:"
|
||||
},
|
||||
"nodes": {
|
||||
"booleanPolymorphicDescription": "Eine Sammlung boolescher Werte.",
|
||||
"colorFieldDescription": "Eine RGBA-Farbe.",
|
||||
"conditioningCollection": "Konditionierungssammlung",
|
||||
"addNode": "Knoten hinzufügen",
|
||||
"conditioningCollectionDescription": "Konditionierung kann zwischen Knoten weitergegeben werden.",
|
||||
"colorPolymorphic": "Farbpolymorph",
|
||||
"colorCodeEdgesHelp": "Farbkodieren Sie Kanten entsprechend ihren verbundenen Feldern",
|
||||
"animatedEdges": "Animierte Kanten",
|
||||
"booleanCollectionDescription": "Eine Sammlung boolescher Werte.",
|
||||
"colorField": "Farbe",
|
||||
"collectionItem": "Objekt in Sammlung",
|
||||
"animatedEdgesHelp": "Animieren Sie ausgewählte Kanten und Kanten, die mit ausgewählten Knoten verbunden sind",
|
||||
"cannotDuplicateConnection": "Es können keine doppelten Verbindungen erstellt werden",
|
||||
"booleanPolymorphic": "Boolesche Polymorphie",
|
||||
"colorPolymorphicDescription": "Eine Sammlung von Farben.",
|
||||
"clipFieldDescription": "Tokenizer- und text_encoder-Untermodelle.",
|
||||
"clipField": "Clip",
|
||||
"colorCollection": "Eine Sammlung von Farben.",
|
||||
"boolean": "Boolesche Werte",
|
||||
"currentImage": "Aktuelles Bild",
|
||||
"booleanDescription": "Boolesche Werte sind wahr oder falsch.",
|
||||
"collection": "Sammlung",
|
||||
"cannotConnectInputToInput": "Eingang kann nicht mit Eingang verbunden werden",
|
||||
"conditioningField": "Konditionierung",
|
||||
"cannotConnectOutputToOutput": "Ausgang kann nicht mit Ausgang verbunden werden",
|
||||
"booleanCollection": "Boolesche Werte Sammlung",
|
||||
"cannotConnectToSelf": "Es kann keine Verbindung zu sich selbst hergestellt werden",
|
||||
"colorCodeEdges": "Farbkodierte Kanten",
|
||||
"addNodeToolTip": "Knoten hinzufügen (Umschalt+A, Leertaste)"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Aktivieren Sie die Korrektur für hohe Auflösungen",
|
||||
"upscaleMethod": "Vergrößerungsmethoden",
|
||||
"enableHrfTooltip": "Generieren Sie mit einer niedrigeren Anfangsauflösung, skalieren Sie auf die Basisauflösung hoch und führen Sie dann Image-to-Image aus.",
|
||||
"metadata": {
|
||||
"strength": "Hochauflösender Fix Stärke",
|
||||
"enabled": "Hochauflösender Fix aktiviert",
|
||||
"method": "Hochauflösender Fix Methode"
|
||||
},
|
||||
"hrf": "Hochauflösender Fix",
|
||||
"hrfStrength": "Hochauflösende Fix Stärke",
|
||||
"strengthTooltip": "Niedrigere Werte führen zu weniger Details, wodurch potenzielle Artefakte reduziert werden können."
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "Keine passenden Modelle",
|
||||
"loading": "lade",
|
||||
"noMatchingLoRAs": "Keine passenden LoRAs",
|
||||
"noLoRAsAvailable": "Keine LoRAs verfügbar",
|
||||
"noModelsAvailable": "Keine Modelle verfügbar",
|
||||
"selectModel": "Wählen ein Modell aus",
|
||||
"noRefinerModelsInstalled": "Keine SDXL Refiner-Modelle installiert",
|
||||
"noLoRAsInstalled": "Keine LoRAs installiert",
|
||||
"selectLoRA": "Wählen ein LoRA aus"
|
||||
}
|
||||
}
|
||||
|
||||
53
invokeai/frontend/web/dist/locales/en.json
vendored
53
invokeai/frontend/web/dist/locales/en.json
vendored
@@ -6,6 +6,7 @@
|
||||
"flipVertically": "Flip Vertically",
|
||||
"invokeProgressBar": "Invoke progress bar",
|
||||
"menu": "Menu",
|
||||
"mode": "Mode",
|
||||
"modelSelect": "Model Select",
|
||||
"modifyConfig": "Modify Config",
|
||||
"nextImage": "Next Image",
|
||||
@@ -30,6 +31,10 @@
|
||||
"cancel": "Cancel",
|
||||
"changeBoard": "Change Board",
|
||||
"clearSearch": "Clear Search",
|
||||
"deleteBoard": "Delete Board",
|
||||
"deleteBoardAndImages": "Delete Board and Images",
|
||||
"deleteBoardOnly": "Delete Board Only",
|
||||
"deletedBoardsCannotbeRestored": "Deleted boards cannot be restored",
|
||||
"loading": "Loading...",
|
||||
"menuItemAutoAdd": "Auto-add to this Board",
|
||||
"move": "Move",
|
||||
@@ -51,9 +56,12 @@
|
||||
"cancel": "Cancel",
|
||||
"close": "Close",
|
||||
"on": "On",
|
||||
"checkpoint": "Checkpoint",
|
||||
"communityLabel": "Community",
|
||||
"controlNet": "ControlNet",
|
||||
"controlAdapter": "Control Adapter",
|
||||
"data": "Data",
|
||||
"details": "Details",
|
||||
"ipAdapter": "IP Adapter",
|
||||
"t2iAdapter": "T2I Adapter",
|
||||
"darkMode": "Dark Mode",
|
||||
@@ -65,6 +73,7 @@
|
||||
"imagePrompt": "Image Prompt",
|
||||
"imageFailedToLoad": "Unable to Load Image",
|
||||
"img2img": "Image To Image",
|
||||
"inpaint": "inpaint",
|
||||
"langArabic": "العربية",
|
||||
"langBrPortuguese": "Português do Brasil",
|
||||
"langDutch": "Nederlands",
|
||||
@@ -93,6 +102,8 @@
|
||||
"nodes": "Workflow Editor",
|
||||
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
|
||||
"openInNewTab": "Open in New Tab",
|
||||
"outpaint": "outpaint",
|
||||
"outputs": "Outputs",
|
||||
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
|
||||
"postProcessDesc2": "A dedicated UI will be released soon to facilitate more advanced post processing workflows.",
|
||||
"postProcessDesc3": "The Invoke AI Command Line Interface offers various other features including Embiggen.",
|
||||
@@ -100,7 +111,9 @@
|
||||
"postProcessing": "Post Processing",
|
||||
"random": "Random",
|
||||
"reportBugLabel": "Report Bug",
|
||||
"safetensors": "Safetensors",
|
||||
"settingsLabel": "Settings",
|
||||
"simple": "Simple",
|
||||
"statusConnected": "Connected",
|
||||
"statusConvertingModel": "Converting Model",
|
||||
"statusDisconnected": "Disconnected",
|
||||
@@ -127,6 +140,7 @@
|
||||
"statusSavingImage": "Saving Image",
|
||||
"statusUpscaling": "Upscaling",
|
||||
"statusUpscalingESRGAN": "Upscaling (ESRGAN)",
|
||||
"template": "Template",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddourings using Textual Inversion using the main script.",
|
||||
@@ -214,6 +228,7 @@
|
||||
"setControlImageDimensions": "Set Control Image Dimensions To W/H",
|
||||
"showAdvanced": "Show Advanced",
|
||||
"toggleControlNet": "Toggle this ControlNet",
|
||||
"unstarImage": "Unstar Image",
|
||||
"w": "W",
|
||||
"weight": "Weight",
|
||||
"enableIPAdapter": "Enable IP Adapter",
|
||||
@@ -279,6 +294,7 @@
|
||||
"next": "Next",
|
||||
"status": "Status",
|
||||
"total": "Total",
|
||||
"time": "Time",
|
||||
"pending": "Pending",
|
||||
"in_progress": "In Progress",
|
||||
"completed": "Completed",
|
||||
@@ -286,6 +302,7 @@
|
||||
"canceled": "Canceled",
|
||||
"completedIn": "Completed in",
|
||||
"batch": "Batch",
|
||||
"batchFieldValues": "Batch Field Values",
|
||||
"item": "Item",
|
||||
"session": "Session",
|
||||
"batchValues": "Batch Values",
|
||||
@@ -335,6 +352,7 @@
|
||||
"loading": "Loading",
|
||||
"loadMore": "Load More",
|
||||
"maintainAspectRatio": "Maintain Aspect Ratio",
|
||||
"noImageSelected": "No Image Selected",
|
||||
"noImagesInGallery": "No Images to Display",
|
||||
"setCurrentImage": "Set as Current Image",
|
||||
"showGenerations": "Show Generations",
|
||||
@@ -583,7 +601,7 @@
|
||||
"strength": "Image to image strength",
|
||||
"Threshold": "Noise Threshold",
|
||||
"variations": "Seed-weight pairs",
|
||||
"vae": "VAE",
|
||||
"vae": "VAE",
|
||||
"width": "Width",
|
||||
"workflow": "Workflow"
|
||||
},
|
||||
@@ -606,6 +624,7 @@
|
||||
"cannotUseSpaces": "Cannot Use Spaces",
|
||||
"checkpointFolder": "Checkpoint Folder",
|
||||
"checkpointModels": "Checkpoints",
|
||||
"checkpointOrSafetensors": "$t(common.checkpoint) / $t(common.safetensors)",
|
||||
"clearCheckpointFolder": "Clear Checkpoint Folder",
|
||||
"closeAdvanced": "Close Advanced",
|
||||
"config": "Config",
|
||||
@@ -685,6 +704,7 @@
|
||||
"nameValidationMsg": "Enter a name for your model",
|
||||
"noCustomLocationProvided": "No Custom Location Provided",
|
||||
"noModels": "No Models Found",
|
||||
"noModelSelected": "No Model Selected",
|
||||
"noModelsFound": "No Models Found",
|
||||
"none": "none",
|
||||
"notLoaded": "not loaded",
|
||||
@@ -730,6 +750,8 @@
|
||||
"widthValidationMsg": "Default width of your model."
|
||||
},
|
||||
"models": {
|
||||
"addLora": "Add LoRA",
|
||||
"esrganModel": "ESRGAN Model",
|
||||
"loading": "loading",
|
||||
"noLoRAsAvailable": "No LoRAs available",
|
||||
"noMatchingLoRAs": "No matching LoRAs",
|
||||
@@ -1010,6 +1032,7 @@
|
||||
"maskAdjustmentsHeader": "Mask Adjustments",
|
||||
"maskBlur": "Blur",
|
||||
"maskBlurMethod": "Blur Method",
|
||||
"maskEdge": "Mask Edge",
|
||||
"negativePromptPlaceholder": "Negative Prompt",
|
||||
"noiseSettings": "Noise",
|
||||
"noiseThreshold": "Noise Threshold",
|
||||
@@ -1057,6 +1080,7 @@
|
||||
"upscale": "Upscale (Shift + U)",
|
||||
"upscaleImage": "Upscale Image",
|
||||
"upscaling": "Upscaling",
|
||||
"unmasked": "Unmasked",
|
||||
"useAll": "Use All",
|
||||
"useCpuNoise": "Use CPU Noise",
|
||||
"cpuNoise": "CPU Noise",
|
||||
@@ -1078,6 +1102,7 @@
|
||||
"dynamicPrompts": "Dynamic Prompts",
|
||||
"enableDynamicPrompts": "Enable Dynamic Prompts",
|
||||
"maxPrompts": "Max Prompts",
|
||||
"promptsPreview": "Prompts Preview",
|
||||
"promptsWithCount_one": "{{count}} Prompt",
|
||||
"promptsWithCount_other": "{{count}} Prompts",
|
||||
"seedBehaviour": {
|
||||
@@ -1117,7 +1142,10 @@
|
||||
"displayHelpIcons": "Display Help Icons",
|
||||
"displayInProgress": "Display Progress Images",
|
||||
"enableImageDebugging": "Enable Image Debugging",
|
||||
"enableInformationalPopovers": "Enable Informational Popovers",
|
||||
"enableInvisibleWatermark": "Enable Invisible Watermark",
|
||||
"enableNodesEditor": "Enable Nodes Editor",
|
||||
"enableNSFWChecker": "Enable NSFW Checker",
|
||||
"experimental": "Experimental",
|
||||
"favoriteSchedulers": "Favorite Schedulers",
|
||||
"favoriteSchedulersPlaceholder": "No schedulers favorited",
|
||||
@@ -1217,7 +1245,8 @@
|
||||
"sentToImageToImage": "Sent To Image To Image",
|
||||
"sentToUnifiedCanvas": "Sent to Unified Canvas",
|
||||
"serverError": "Server Error",
|
||||
"setCanvasInitialImage": "Set as canvas initial image",
|
||||
"setAsCanvasInitialImage": "Set as canvas initial image",
|
||||
"setCanvasInitialImage": "Set canvas initial image",
|
||||
"setControlImage": "Set as control image",
|
||||
"setIPAdapterImage": "Set as IP Adapter Image",
|
||||
"setInitialImage": "Set as initial image",
|
||||
@@ -1275,11 +1304,15 @@
|
||||
},
|
||||
"compositingBlur": {
|
||||
"heading": "Blur",
|
||||
"paragraphs": ["The blur radius of the mask."]
|
||||
"paragraphs": [
|
||||
"The blur radius of the mask."
|
||||
]
|
||||
},
|
||||
"compositingBlurMethod": {
|
||||
"heading": "Blur Method",
|
||||
"paragraphs": ["The method of blur applied to the masked area."]
|
||||
"paragraphs": [
|
||||
"The method of blur applied to the masked area."
|
||||
]
|
||||
},
|
||||
"compositingCoherencePass": {
|
||||
"heading": "Coherence Pass",
|
||||
@@ -1289,7 +1322,9 @@
|
||||
},
|
||||
"compositingCoherenceMode": {
|
||||
"heading": "Mode",
|
||||
"paragraphs": ["The mode of the Coherence Pass."]
|
||||
"paragraphs": [
|
||||
"The mode of the Coherence Pass."
|
||||
]
|
||||
},
|
||||
"compositingCoherenceSteps": {
|
||||
"heading": "Steps",
|
||||
@@ -1307,7 +1342,9 @@
|
||||
},
|
||||
"compositingMaskAdjustments": {
|
||||
"heading": "Mask Adjustments",
|
||||
"paragraphs": ["Adjust the mask."]
|
||||
"paragraphs": [
|
||||
"Adjust the mask."
|
||||
]
|
||||
},
|
||||
"controlNetBeginEnd": {
|
||||
"heading": "Begin / End Step Percentage",
|
||||
@@ -1365,7 +1402,9 @@
|
||||
},
|
||||
"infillMethod": {
|
||||
"heading": "Infill Method",
|
||||
"paragraphs": ["Method to infill the selected area."]
|
||||
"paragraphs": [
|
||||
"Method to infill the selected area."
|
||||
]
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA Weight",
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
# Generated axios API client
|
||||
|
||||
- [Generated axios API client](#generated-axios-api-client)
|
||||
- [Generation](#generation)
|
||||
- [Generate the API client from the nodes web server](#generate-the-api-client-from-the-nodes-web-server)
|
||||
- [Generate the API client from JSON](#generate-the-api-client-from-json)
|
||||
- [Getting the JSON from the nodes web server](#getting-the-json-from-the-nodes-web-server)
|
||||
- [Getting the JSON with a python script](#getting-the-json-with-a-python-script)
|
||||
- [Generate the API client](#generate-the-api-client)
|
||||
- [The generated client](#the-generated-client)
|
||||
- [API client customisation](#api-client-customisation)
|
||||
|
||||
This API client is generated by an [openapi code generator](https://github.com/ferdikoomen/openapi-typescript-codegen).
|
||||
|
||||
All files in `invokeai/frontend/web/src/services/api/` are made by the generator.
|
||||
|
||||
## Generation
|
||||
|
||||
The axios client may be generated by from the OpenAPI schema from the nodes web server, or from JSON.
|
||||
|
||||
### Generate the API client from the nodes web server
|
||||
|
||||
We need to start the nodes web server, which serves the OpenAPI schema to the generator.
|
||||
|
||||
1. Start the nodes web server.
|
||||
|
||||
```bash
|
||||
# from the repo root
|
||||
python scripts/invokeai-web.py
|
||||
```
|
||||
|
||||
2. Generate the API client.
|
||||
|
||||
```bash
|
||||
# from invokeai/frontend/web/
|
||||
yarn api:web
|
||||
```
|
||||
|
||||
### Generate the API client from JSON
|
||||
|
||||
The JSON can be acquired from the nodes web server, or with a python script.
|
||||
|
||||
#### Getting the JSON from the nodes web server
|
||||
|
||||
Start the nodes web server as described above, then download the file.
|
||||
|
||||
```bash
|
||||
# from invokeai/frontend/web/
|
||||
curl http://localhost:9090/openapi.json -o openapi.json
|
||||
```
|
||||
|
||||
#### Getting the JSON with a python script
|
||||
|
||||
Run this python script from the repo root, so it can access the nodes server modules.
|
||||
|
||||
The script will output `openapi.json` in the repo root. Then we need to move it to `invokeai/frontend/web/`.
|
||||
|
||||
```bash
|
||||
# from the repo root
|
||||
python invokeai/app/util/generate_openapi_json.py
|
||||
mv invokeai/app/util/openapi.json invokeai/frontend/web/services/fixtures/
|
||||
```
|
||||
|
||||
#### Generate the API client
|
||||
|
||||
Now we can generate the API client from the JSON.
|
||||
|
||||
```bash
|
||||
# from invokeai/frontend/web/
|
||||
yarn api:file
|
||||
```
|
||||
|
||||
## The generated client
|
||||
|
||||
The client will be written to `invokeai/frontend/web/services/api/`:
|
||||
|
||||
- `axios` client
|
||||
- TS types
|
||||
- An easily parseable schema, which we can use to generate UI
|
||||
|
||||
## API client customisation
|
||||
|
||||
The generator has a default `request.ts` file that implements a base `axios` client. The generated client uses this base client.
|
||||
|
||||
One shortcoming of this is base client is it does not provide response headers unless the response body is empty. To fix this, we provide our own lightly-patched `request.ts`.
|
||||
|
||||
To access the headers, call `getHeaders(response)` on any response from the generated api client. This function is exported from `invokeai/frontend/web/src/services/util/getHeaders.ts`.
|
||||
@@ -1,21 +0,0 @@
|
||||
# Events
|
||||
|
||||
Events via `socket.io`
|
||||
|
||||
## `actions.ts`
|
||||
|
||||
Redux actions for all socket events. Payloads all include a timestamp, and optionally some other data.
|
||||
|
||||
Any reducer (or middleware) can respond to the actions.
|
||||
|
||||
## `middleware.ts`
|
||||
|
||||
Redux middleware for events.
|
||||
|
||||
Handles dispatching the event actions. Only put logic here if it can't really go anywhere else.
|
||||
|
||||
For example, on connect we want to load images to the gallery if it's not populated. This requires dispatching a thunk, so we need to directly dispatch this in the middleware.
|
||||
|
||||
## `types.ts`
|
||||
|
||||
Hand-written types for the socket events. Cannot generate these from the server, but fortunately they are few and simple.
|
||||
@@ -1,17 +0,0 @@
|
||||
# Node Editor Design
|
||||
|
||||
WIP
|
||||
|
||||
nodes
|
||||
|
||||
everything in `src/features/nodes/`
|
||||
|
||||
have a look at `state.nodes.invocation`
|
||||
|
||||
- on socket connect, if no schema saved, fetch `localhost:9090/openapi.json`, save JSON to `state.nodes.schema`
|
||||
- on fulfilled schema fetch, `parseSchema()` the schema. this outputs a `Record<string, Invocation>` which is saved to `state.nodes.invocations` - `Invocation` is like a template for the node
|
||||
- when you add a node, the the `Invocation` template is passed to `InvocationComponent.tsx` to build the UI component for that node
|
||||
- inputs/outputs have field types - and each field type gets an `FieldComponent` which includes a dispatcher to write state changes to redux `nodesSlice`
|
||||
- `reactflow` sends changes to nodes/edges to redux
|
||||
- to invoke, `buildNodesGraph()` state, then send this
|
||||
- changed onClick Invoke button actions to build the schema, then when schema builds it dispatches the actual network request to create the session - see `session.ts`
|
||||
@@ -1,17 +0,0 @@
|
||||
# Package Scripts
|
||||
|
||||
WIP walkthrough of `package.json` scripts.
|
||||
|
||||
## `theme` & `theme:watch`
|
||||
|
||||
These run the Chakra CLI to generate types for the theme, or watch for code change and re-generate the types.
|
||||
|
||||
The CLI essentially monkeypatches Chakra's files in `node_modules`.
|
||||
|
||||
## `postinstall`
|
||||
|
||||
The `postinstall` script patches a few packages and runs the Chakra CLI to generate types for the theme.
|
||||
|
||||
### Patch `@chakra-ui/cli`
|
||||
|
||||
See: <https://github.com/chakra-ui/chakra-ui/issues/7394>
|
||||
@@ -1,43 +1,118 @@
|
||||
# InvokeAI Web UI
|
||||
|
||||
<!-- @import "[TOC]" {cmd="toc" depthFrom=1 depthTo=6 orderedList=false} -->
|
||||
|
||||
<!-- code_chunk_output -->
|
||||
|
||||
- [InvokeAI Web UI](#invokeai-web-ui)
|
||||
- [Stack](#stack)
|
||||
- [Core Libraries](#core-libraries)
|
||||
- [Redux Toolkit](#redux-toolkit)
|
||||
- [Socket\.IO](#socketio)
|
||||
- [Chakra UI](#chakra-ui)
|
||||
- [KonvaJS](#konvajs)
|
||||
- [Vite](#vite)
|
||||
- [i18next & Weblate](#i18next--weblate)
|
||||
- [openapi-typescript](#openapi-typescript)
|
||||
- [reactflow](#reactflow)
|
||||
- [zod](#zod)
|
||||
- [Client Types Generation](#client-types-generation)
|
||||
- [Package Scripts](#package-scripts)
|
||||
- [Contributing](#contributing)
|
||||
- [Dev Environment](#dev-environment)
|
||||
- [VSCode Remote Dev](#vscode-remote-dev)
|
||||
- [Production builds](#production-builds)
|
||||
|
||||
The UI is a fairly straightforward Typescript React app. The only really fancy stuff is the Unified Canvas.
|
||||
<!-- /code_chunk_output -->
|
||||
|
||||
Code in `invokeai/frontend/web/` if you want to have a look.
|
||||
The UI is a fairly straightforward Typescript React app.
|
||||
|
||||
## Stack
|
||||
## Core Libraries
|
||||
|
||||
State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). We lean heavily on RTK:
|
||||
- `createAsyncThunk` for HTTP requests
|
||||
- `createEntityAdapter` for fetching images and models
|
||||
- `createListenerMiddleware` for workflows
|
||||
InvokeAI's UI is made possible by a number of excellent open-source libraries. The most heavily-used are listed below, but there are many others.
|
||||
|
||||
The API client and associated types are generated from the OpenAPI schema. See API_CLIENT.md.
|
||||
### Redux Toolkit
|
||||
|
||||
Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a simple socket.io redux middleware to help).
|
||||
[Redux Toolkit] is used for state management and fetching/caching:
|
||||
|
||||
[Chakra-UI](https://github.com/chakra-ui/chakra-ui) for components and styling.
|
||||
- `RTK-Query` for data fetching and caching
|
||||
- `createAsyncThunk` for a couple other HTTP requests
|
||||
- `createEntityAdapter` to normalize things like images and models
|
||||
- `createListenerMiddleware` for async workflows
|
||||
|
||||
[Konva](https://github.com/konvajs/react-konva) for the canvas, but we are pushing the limits of what is feasible with it (and HTML canvas in general). We plan to rebuild it with [PixiJS](https://github.com/pixijs/pixijs) to take advantage of WebGL's improved raster handling.
|
||||
We use [redux-remember] for persistence.
|
||||
|
||||
[Vite](https://vitejs.dev/) for bundling.
|
||||
### Socket\.IO
|
||||
|
||||
Localisation is via [i18next](https://github.com/i18next/react-i18next), but translation happens on our [Weblate](https://hosted.weblate.org/engage/invokeai/) project. Only the English source strings should be changed on this repo.
|
||||
[Socket.IO] is used for server-to-client events, like generation process and queue state changes.
|
||||
|
||||
### Chakra UI
|
||||
|
||||
[Chakra UI] is our primary UI library, but we also use a few components from [Mantine v6].
|
||||
|
||||
### KonvaJS
|
||||
|
||||
[KonvaJS] powers the canvas. In the future, we'd like to explore [PixiJS] or WebGPU.
|
||||
|
||||
### Vite
|
||||
|
||||
[Vite] is our bundler.
|
||||
|
||||
### i18next & Weblate
|
||||
|
||||
We use [i18next] for localization, but translation to languages other than English happens on our [Weblate] project. **Only the English source strings should be changed on this repo.**
|
||||
|
||||
### openapi-typescript
|
||||
|
||||
[openapi-typescript] is used to generate types from the server's OpenAPI schema. See TYPES_CODEGEN.md.
|
||||
|
||||
### reactflow
|
||||
|
||||
[reactflow] powers the Workflow Editor.
|
||||
|
||||
### zod
|
||||
|
||||
[zod] schemas are used to model data structures and provide runtime validation.
|
||||
|
||||
## Client Types Generation
|
||||
|
||||
We use [openapi-typescript] to generate types from the app's OpenAPI schema.
|
||||
|
||||
The generated types are written to `invokeai/frontend/web/src/services/api/schema.d.ts`. This file is committed to the repo.
|
||||
|
||||
The server must be started and available at <http://127.0.0.1:9090>.
|
||||
|
||||
```sh
|
||||
# from the repo root, start the server
|
||||
python scripts/invokeai-web.py
|
||||
# from invokeai/frontend/web/, run the script
|
||||
yarn typegen
|
||||
```
|
||||
|
||||
## Package Scripts
|
||||
|
||||
See `package.json` for all scripts.
|
||||
|
||||
Run with `yarn <script name>`.
|
||||
|
||||
- `dev`: run the frontend in dev mode, enabling hot reloading
|
||||
- `build`: run all checks (madge, eslint, prettier, tsc) and then build the frontend
|
||||
- `typegen`: generate types from the OpenAPI schema (see [Client Types Generation](#client-types-generation))
|
||||
- `lint:madge`: check frontend for circular dependencies
|
||||
- `lint:eslint`: check frontend for code quality
|
||||
- `lint:prettier`: check frontend for code formatting
|
||||
- `lint:tsc`: check frontend for type issues
|
||||
- `lint`: run all checks concurrently
|
||||
- `fix`: run `eslint` and `prettier`, fixing fixable issues
|
||||
|
||||
## Contributing
|
||||
|
||||
Thanks for your interest in contributing to the InvokeAI Web UI!
|
||||
|
||||
We encourage you to ping @psychedelicious and @blessedcoolant on [Discord](https://discord.gg/ZmtBAhwWhy) if you want to contribute, just to touch base and ensure your work doesn't conflict with anything else going on. The project is very active.
|
||||
We encourage you to ping @psychedelicious and @blessedcoolant on [discord] if you want to contribute, just to touch base and ensure your work doesn't conflict with anything else going on. The project is very active.
|
||||
|
||||
### Dev Environment
|
||||
|
||||
Install [node](https://nodejs.org/en/download/) and [yarn classic](https://classic.yarnpkg.com/lang/en/).
|
||||
Install [node] and [yarn classic].
|
||||
|
||||
From `invokeai/frontend/web/` run `yarn install` to get everything set up.
|
||||
|
||||
@@ -60,3 +135,20 @@ For a number of technical and logistical reasons, we need to commit UI build art
|
||||
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
|
||||
|
||||
To build for production, run `yarn build`.
|
||||
|
||||
[node]: https://nodejs.org/en/download/
|
||||
[yarn classic]: https://classic.yarnpkg.com/lang/en/
|
||||
[discord]: https://discord.gg/ZmtBAhwWhy
|
||||
[Redux Toolkit]: https://github.com/reduxjs/redux-toolkit
|
||||
[redux-remember]: https://github.com/zewish/redux-remember
|
||||
[Socket.IO]: https://github.com/socketio/socket.io
|
||||
[Chakra UI]: https://github.com/chakra-ui/chakra-ui
|
||||
[Mantine v6]: https://v6.mantine.dev/
|
||||
[KonvaJS]: https://github.com/konvajs/react-konva
|
||||
[PixiJS]: https://github.com/pixijs/pixijs
|
||||
[Vite]: https://github.com/vitejs/vite
|
||||
[i18next]: https://github.com/i18next/react-i18next
|
||||
[Weblate]: https://hosted.weblate.org/engage/invokeai/
|
||||
[openapi-typescript]: https://github.com/drwpow/openapi-typescript
|
||||
[reactflow]: https://github.com/xyflow/xyflow
|
||||
[zod]: https://github.com/colinhacks/zod
|
||||
|
||||
350
invokeai/frontend/web/docs/WORKFLOWS_DESIGN_IMPLEMENTATION.md
Normal file
350
invokeai/frontend/web/docs/WORKFLOWS_DESIGN_IMPLEMENTATION.md
Normal file
@@ -0,0 +1,350 @@
|
||||
# Workflows - Design and Implementation
|
||||
|
||||
<!-- @import "[TOC]" {cmd="toc" depthFrom=1 depthTo=6 orderedList=false} -->
|
||||
|
||||
<!-- code_chunk_output -->
|
||||
|
||||
- [Workflows - Design and Implementation](#workflows---design-and-implementation)
|
||||
- [Design](#design)
|
||||
- [Linear UI](#linear-ui)
|
||||
- [Workflow Editor](#workflow-editor)
|
||||
- [Workflows](#workflows)
|
||||
- [Workflow -> reactflow state -> InvokeAI graph](#workflow---reactflow-state---invokeai-graph)
|
||||
- [Nodes vs Invocations](#nodes-vs-invocations)
|
||||
- [Workflow Linear View](#workflow-linear-view)
|
||||
- [OpenAPI Schema](#openapi-schema)
|
||||
- [Field Instances and Templates](#field-instances-and-templates)
|
||||
- [Stateful vs Stateless Fields](#stateful-vs-stateless-fields)
|
||||
- [Collection and Polymorphic Fields](#collection-and-polymorphic-fields)
|
||||
- [Implementation](#implementation)
|
||||
- [zod Schemas and Types](#zod-schemas-and-types)
|
||||
- [OpenAPI Schema Parsing](#openapi-schema-parsing)
|
||||
- [Parsing Field Types](#parsing-field-types)
|
||||
- [Primitive Types](#primitive-types)
|
||||
- [Complex Types](#complex-types)
|
||||
- [Collection Types](#collection-types)
|
||||
- [Polymorphic Types](#polymorphic-types)
|
||||
- [Optional Fields](#optional-fields)
|
||||
- [Building Field Input Templates](#building-field-input-templates)
|
||||
- [Building Field Output Templates](#building-field-output-templates)
|
||||
- [Managing reactflow State](#managing-reactflow-state)
|
||||
- [Building Nodes and Edges](#building-nodes-and-edges)
|
||||
- [Building a Workflow](#building-a-workflow)
|
||||
- [Loading a Workflow](#loading-a-workflow)
|
||||
- [Workflow Migrations](#workflow-migrations)
|
||||
|
||||
<!-- /code_chunk_output -->
|
||||
|
||||
> This document describes, at a high level, the design and implementation of workflows in the InvokeAI frontend. There are a substantial number of implementation details not included, but which are hopefully clear from the code.
|
||||
|
||||
InvokeAI's backend uses graphs, composed of **nodes** and **edges**, to process data and generate images.
|
||||
|
||||
Nodes have any number of **input fields** and **output fields**. Edges connect nodes together via their inputs and outputs. Fields have data types which dictate how they may be connected.
|
||||
|
||||
During execution, a nodes' outputs may be passed along to any number of other nodes' inputs.
|
||||
|
||||
Workflows are an enriched abstraction over a graph.
|
||||
|
||||
## Design
|
||||
|
||||
InvokeAI provide two ways to build graphs in the frontend: the [Linear UI](#linear-ui) and [Workflow Editor](#workflow-editor).
|
||||
|
||||
To better understand the use case and challenges related to workflows, we will review both of these modes.
|
||||
|
||||
### Linear UI
|
||||
|
||||
This includes the **Text to Image**, **Image to Image** and **Unified Canvas** tabs.
|
||||
|
||||
The user-managed parameters on these tabs are stored as simple objects in the application state. When the user invokes, adding a generation to the queue, we internally build a graph from these parameters.
|
||||
|
||||
This logic can be fairly complex due to the range of features available and their interactions. Depending on the parameters selected, the graph may be very different. Building graphs in code can be challenging - you are trying to construct a non-linear structure in a linear context.
|
||||
|
||||
The simplest graph building logic is for **Text to Image** with a SD1.5 model: [buildLinearTextToImageGraph.ts]
|
||||
|
||||
There are many other graph builders in the same directory for different tabs or base models (e.g. SDXL). Some are pretty hairy.
|
||||
|
||||
In the Linear UI, we go straight from **simple application state** to **graph** via these builders.
|
||||
|
||||
### Workflow Editor
|
||||
|
||||
The Workflow Editor is a visual graph editor, allowing users to draw edges from node to node to construct a graph. This _far_ more approachable way to create complex graphs.
|
||||
|
||||
InvokeAI uses the [reactflow] library to power the Workflow Editor. It provides both a graph editor UI and manages its own internal graph state.
|
||||
|
||||
#### Workflows
|
||||
|
||||
A workflow is a representation of a graph plus additional metadata:
|
||||
|
||||
- Name
|
||||
- Description
|
||||
- Version
|
||||
- Notes
|
||||
- [Exposed fields](#workflow-linear-view)
|
||||
- Author, tags, category, etc.
|
||||
|
||||
Workflows should have other qualities:
|
||||
|
||||
- Portable: you should be able to load a workflow created by another person.
|
||||
- Resilient: you should be able to "upgrade" a workflow as the application changes.
|
||||
- Abstract: as much as is possible, workflows should not be married to the specific implementation details of the application.
|
||||
|
||||
To support these qualities, workflows are serializable, have a versioned schemas, and represent graphs as minimally as possible. Fortunately, the reactflow state for nodes and edges works perfectly for this.
|
||||
|
||||
##### Workflow -> reactflow state -> InvokeAI graph
|
||||
|
||||
Given a workflow, we need to be able to derive reactflow state and/or an InvokeAI graph from it.
|
||||
|
||||
The first step - workflow to reactflow state - is very simple. The logic is in [nodesSlice.ts], in the `workflowLoaded` reducer.
|
||||
|
||||
The reactflow state is, however, structurally incompatible with our backend's graph structure. When a user invokes on a Workflow, we need to convert the reactflow state into an InvokeAI graph. This is far simpler than the graph building logic from the Linear UI:
|
||||
[buildNodesGraph.ts]
|
||||
|
||||
##### Nodes vs Invocations
|
||||
|
||||
We often use the terms "node" and "invocation" interchangeably, but they may refer to different things in the frontend.
|
||||
|
||||
reactflow [has its own definitions][reactflow-concepts] of "node", "edge" and "handle" which are closely related to InvokeAI graph concepts.
|
||||
|
||||
- A reactflow node is related to an InvokeAI invocation. It has a "data" property, which holds the InvokeAI-specific invocation data.
|
||||
- A reactflow edge is roughly equivalent to an InvokeAI edge.
|
||||
- A reactflow handle is roughly equivalent to an InvokeAI input or output field.
|
||||
|
||||
##### Workflow Linear View
|
||||
|
||||
Graphs are very capable data structures, but not everyone wants to work with them all the time.
|
||||
|
||||
To allow less technical users - or anyone who wants a less visually noisy workspace - to benefit from the power of nodes, InvokeAI has a workflow feature called the Linear View.
|
||||
|
||||
A workflow input field can be added to this Linear View, and its input component can be presented similarly to the Linear UI tabs. Internally, we add the field to the workflow's list of exposed fields.
|
||||
|
||||
#### OpenAPI Schema
|
||||
|
||||
OpenAPI is a schema specification that can represent complex data structures and relationships. The backend is capable of generating an OpenAPI schema for all invocations.
|
||||
|
||||
When the UI connects, it requests this schema and parses each invocation into an **invocation template**. Invocation templates have a number of properties, like title, description and type, but the most important ones are their input and output **field templates**.
|
||||
|
||||
Invocation and field templates are the "source of truth" for graphs, because they indicate what the backend is able to process.
|
||||
|
||||
When a user adds a new node to their workflow, these templates are used to instantiate a node with fields instantiated from the input and output field templates.
|
||||
|
||||
##### Field Instances and Templates
|
||||
|
||||
Field templates consist of:
|
||||
|
||||
- Name: the identifier of the field, its variable name in python
|
||||
- Type: derived from the field's type annotation in python (e.g. IntegerField, ImageField, MainModelField)
|
||||
- Constraints: derived from the field's creation args in python (e.g. minimum value for an integer)
|
||||
- Default value: optionally provided in the field's creation args (e.g. 42 for an integer)
|
||||
|
||||
Field instances are created from the templates and have name, type and optionally a value.
|
||||
|
||||
The type of the field determines the UI components that are rendered for it.
|
||||
|
||||
A field instance's name associates it with its template.
|
||||
|
||||
##### Stateful vs Stateless Fields
|
||||
|
||||
**Stateful** fields store their value in the frontend graph. Think primitives, model identifiers, images, etc. Fields are only stateful if the frontend allows the user to directly input a value for them.
|
||||
|
||||
Many field types, however, are **stateless**. An example is a `UNetField`, which contains some data describing a UNet. Users cannot directly provide this data - it is created and consumed in the backend.
|
||||
|
||||
Stateless fields do not store their value in the node, so their field instances do not have values.
|
||||
|
||||
"Custom" fields will always be treated as stateless fields.
|
||||
|
||||
##### Collection and Polymorphic Fields
|
||||
|
||||
Field types have a name and two flags which may identify it as a **collection** or **polymorphic** field.
|
||||
|
||||
If a field is annotated in python as a list, its field type is parsed and flagged as a collection type (e.g. `list[int]`).
|
||||
|
||||
If it is annotated as a union of a type and list, the type will be flagged as a polymorphic type (e.g. `Union[int, list[int]]`). Fields may not be unions of different types (e.g. `Union[int, list[str]]` and `Union[int, str]` are not allowed).
|
||||
|
||||
## Implementation
|
||||
|
||||
The majority of data structures in the backend are [pydantic] models. Pydantic provides OpenAPI schemas for all models and we then generate TypeScript types from those.
|
||||
|
||||
The OpenAPI schema is parsed at runtime into our invocation templates.
|
||||
|
||||
Workflows and all related data are modeled in the frontend using [zod]. Related types are inferred from the zod schemas.
|
||||
|
||||
> In python, invocations are pydantic models with fields. These fields become node inputs. The invocation's `invoke()` function returns a pydantic model - its output. Like the invocation itself, the output model has any number of fields, which become node outputs.
|
||||
|
||||
### zod Schemas and Types
|
||||
|
||||
The zod schemas, inferred types, and type guards are in [types/].
|
||||
|
||||
Roughly order from lowest-level to highest:
|
||||
|
||||
- `common.ts`: stateful field data, and couple other misc types
|
||||
- `field.ts`: fields - types, values, instances, templates
|
||||
- `invocation.ts`: invocations and other node types
|
||||
- `workflow.ts`: workflows and constituents
|
||||
|
||||
We customize the OpenAPI schema to include additional properties on invocation and field schemas. To facilitate parsing this schema into templates, we modify/wrap the types from [openapi-types] in `openapi.ts`.
|
||||
|
||||
### OpenAPI Schema Parsing
|
||||
|
||||
The entrypoint for OpenAPI schema parsing is [parseSchema.ts].
|
||||
|
||||
General logic flow:
|
||||
|
||||
- Iterate over all invocation schema objects
|
||||
- Extract relevant invocation-level attributes (e.g. title, type, version, etc)
|
||||
- Iterate over the invocation's input fields
|
||||
- [Parse each field's type](#parsing-field-types)
|
||||
- [Build a field input template](#building-field-input-templates) from the type - either a stateful template or "generic" stateless template
|
||||
- Iterate over the invocation's output fields
|
||||
- Parse the field's type (same as inputs)
|
||||
- [Build a field output template](#building-field-output-templates)
|
||||
- Assemble the attributes and fields into an invocation template
|
||||
|
||||
Most of these involve very straightforward `reduce`s, but the less intuitive steps are detailed below.
|
||||
|
||||
#### Parsing Field Types
|
||||
|
||||
Field types are represented as structured objects:
|
||||
|
||||
```ts
|
||||
type FieldType = {
|
||||
name: string;
|
||||
isCollection: boolean;
|
||||
isCollectionOrScalar: boolean;
|
||||
};
|
||||
```
|
||||
|
||||
The parsing logic is in `parseFieldType.ts`.
|
||||
|
||||
There are 4 general cases for field type parsing.
|
||||
|
||||
##### Primitive Types
|
||||
|
||||
When a field is annotated as a primitive values (e.g. `int`, `str`, `float`), the field type parsing is fairly straightforward. The field is represented by a simple OpenAPI **schema object**, which has a `type` property.
|
||||
|
||||
We create a field type name from this `type` string (e.g. `string` -> `StringField`).
|
||||
|
||||
##### Complex Types
|
||||
|
||||
When a field is annotated as a pydantic model (e.g. `ImageField`, `MainModelField`, `ControlField`), it is represented as a **reference object**. Reference objects are pointers to another schema or reference object within the schema.
|
||||
|
||||
We need to **dereference** the schema to pull these out. Dereferencing may require recursion. We use the reference object's name directly for the field type name.
|
||||
|
||||
> Unfortunately, at this time, we've had limited success using external libraries to deference at runtime, so we do this ourselves.
|
||||
|
||||
##### Collection Types
|
||||
|
||||
When a field is annotated as a list of a single type, the schema object has an `items` property. They may be a schema object or reference object and must be parsed to determine the item type.
|
||||
|
||||
We use the item type for field type name, adding `isCollection: true` to the field type.
|
||||
|
||||
##### Collection or Scalar Types
|
||||
|
||||
When a field is annotated as a union of a type and list of that type, the schema object has an `anyOf` property, which holds a list of valid types for the union.
|
||||
|
||||
After verifying that the union has two members (a type and list of the same type), we use the type for field type name, adding `isCollectionOrScalar: true` to the field type.
|
||||
|
||||
##### Optional Fields
|
||||
|
||||
In OpenAPI v3.1, when an object is optional, it is put into an `anyOf` along with a primitive schema object with `type: 'null'`.
|
||||
|
||||
Handling this adds a fair bit of complexity, as we now must filter out the `'null'` types and work with the remaining types as described above.
|
||||
|
||||
If there is a single remaining schema object, we must recursively call to `parseFieldType()` to get parse it.
|
||||
|
||||
#### Building Field Input Templates
|
||||
|
||||
Now that we have a field type, we can build an input template for the field.
|
||||
|
||||
Stateful fields all get a function to build their template, while stateless fields are constructed directly. This is possible because stateless fields have no default value or constraints.
|
||||
|
||||
See [buildFieldInputTemplate.ts].
|
||||
|
||||
#### Building Field Output Templates
|
||||
|
||||
Field outputs are similar to stateless fields - they do not have any value in the frontend. When building their templates, we don't need a special function for each field type.
|
||||
|
||||
See [buildFieldOutputTemplate.ts].
|
||||
|
||||
### Managing reactflow State
|
||||
|
||||
As described above, the workflow editor state is the essentially the reactflow state, plus some extra metadata.
|
||||
|
||||
We provide reactflow with an array of nodes and edges via redux, and a number of [event handlers][reactflow-events]. These handlers dispatch redux actions, managing nodes and edges.
|
||||
|
||||
The pieces of redux state relevant to workflows are:
|
||||
|
||||
- `state.nodes.nodes`: the reactflow nodes state
|
||||
- `state.nodes.edges`: the reactflow edges state
|
||||
- `state.nodes.workflow`: the workflow metadata
|
||||
|
||||
#### Building Nodes and Edges
|
||||
|
||||
A reactflow node has a few important top-level properties:
|
||||
|
||||
- `id`: unique identifier
|
||||
- `type`: a string that maps to a react component to render the node
|
||||
- `position`: XY coordinates
|
||||
- `data`: arbitrary data
|
||||
|
||||
When the user adds a node, we build **invocation node data**, storing it in `data`. Invocation properties (e.g. type, version, label, etc.) are copied from the invocation template. Inputs and outputs are built from the invocation template's field templates.
|
||||
|
||||
See [buildInvocationNode.ts].
|
||||
|
||||
Edges are managed by reactflow, but briefly, they consist of:
|
||||
|
||||
- `source`: id of the source node
|
||||
- `sourceHandle`: id of the source node handle (output field)
|
||||
- `target`: id of the target node
|
||||
- `targetHandle`: id of the target node handle (input field)
|
||||
|
||||
> Edge creation is gated behind validation logic. This validation compares the input and output field types and overall graph state.
|
||||
|
||||
#### Building a Workflow
|
||||
|
||||
Building a workflow entity is as simple as dropping the nodes, edges and metadata into an object.
|
||||
|
||||
Each node and edge is parsed with a zod schema, which serves to strip out any unneeded data.
|
||||
|
||||
See [buildWorkflow.ts].
|
||||
|
||||
#### Loading a Workflow
|
||||
|
||||
Workflows may be loaded from external sources or the user's local instance. In all cases, the workflow needs to be handled with care, as an untrusted object.
|
||||
|
||||
Loading has a few stages which may throw or warn if there are problems:
|
||||
|
||||
- Parsing the workflow data structure itself, [migrating](#workflow-migrations) it if necessary (throws)
|
||||
- Check for a template for each node (warns)
|
||||
- Check each node's version against its template (warns)
|
||||
- Validate the source and target of each edge (warns)
|
||||
|
||||
This validation occurs in [validateWorkflow.ts].
|
||||
|
||||
If there are no fatal errors, the workflow is then stored in redux state.
|
||||
|
||||
### Workflow Migrations
|
||||
|
||||
When the workflow schema changes, we may need to perform some data migrations. This occurs as workflows are loaded. zod schemas for each workflow schema version is retained to facilitate migrations.
|
||||
|
||||
Previous schemas are in folders in `invokeai/frontend/web/src/features/nodes/types/`, eg `v1/`.
|
||||
|
||||
Migration logic is in [migrations.ts].
|
||||
|
||||
<!-- links -->
|
||||
|
||||
[pydantic]: https://github.com/pydantic/pydantic 'pydantic'
|
||||
[zod]: https://github.com/colinhacks/zod 'zod'
|
||||
[openapi-types]: https://github.com/kogosoftwarellc/open-api/tree/main/packages/openapi-types 'openapi-types'
|
||||
[reactflow]: https://github.com/xyflow/xyflow 'reactflow'
|
||||
[reactflow-concepts]: https://reactflow.dev/learn/concepts/terms-and-definitions
|
||||
[reactflow-events]: https://reactflow.dev/api-reference/react-flow#event-handlers
|
||||
[buildWorkflow.ts]: ../src/features/nodes/util/workflow/buildWorkflow.ts
|
||||
[nodesSlice.ts]: ../src/features/nodes/store/nodesSlice.ts
|
||||
[buildLinearTextToImageGraph.ts]: ../src/features/nodes/util/graph/buildLinearTextToImageGraph.ts
|
||||
[buildNodesGraph.ts]: ../src/features/nodes/util/graph/buildNodesGraph.ts
|
||||
[buildInvocationNode.ts]: ../src/features/nodes/util/node/buildInvocationNode.ts
|
||||
[validateWorkflow.ts]: ../src/features/nodes/util/workflow/validateWorkflow.ts
|
||||
[migrations.ts]: ../src/features/nodes/util/workflow/migrations.ts
|
||||
[parseSchema.ts]: ../src/features/nodes/util/schema/parseSchema.ts
|
||||
[buildFieldInputTemplate.ts]: ../src/features/nodes/util/schema/buildFieldInputTemplate.ts
|
||||
[buildFieldOutputTemplate.ts]: ../src/features/nodes/util/schema/buildFieldOutputTemplate.ts
|
||||
@@ -19,7 +19,6 @@
|
||||
"dist"
|
||||
],
|
||||
"scripts": {
|
||||
"prepare": "cd ../../../ && husky install invokeai/frontend/web/.husky",
|
||||
"dev": "concurrently \"vite dev\" \"yarn run theme:watch\"",
|
||||
"dev:host": "concurrently \"vite dev --host\" \"yarn run theme:watch\"",
|
||||
"build": "yarn run lint && vite build",
|
||||
@@ -30,7 +29,7 @@
|
||||
"lint:prettier": "prettier --check .",
|
||||
"lint:tsc": "tsc --noEmit",
|
||||
"lint": "concurrently -g -n eslint,prettier,tsc,madge -c cyan,green,magenta,yellow \"yarn run lint:eslint\" \"yarn run lint:prettier\" \"yarn run lint:tsc\" \"yarn run lint:madge\"",
|
||||
"fix": "eslint --fix . && prettier --loglevel warn --write . && tsc --noEmit",
|
||||
"fix": "eslint --fix . && prettier --log-level warn --write .",
|
||||
"lint-staged": "lint-staged",
|
||||
"postinstall": "patch-package && yarn run theme",
|
||||
"theme": "chakra-cli tokens src/theme/theme.ts",
|
||||
@@ -80,7 +79,6 @@
|
||||
"lodash-es": "^4.17.21",
|
||||
"nanostores": "^0.9.4",
|
||||
"new-github-issue-url": "^1.0.0",
|
||||
"openapi-fetch": "^0.8.1",
|
||||
"overlayscrollbars": "^2.4.4",
|
||||
"overlayscrollbars-react": "^0.5.3",
|
||||
"patch-package": "^8.0.0",
|
||||
@@ -133,6 +131,8 @@
|
||||
"concurrently": "^8.2.2",
|
||||
"eslint": "^8.53.0",
|
||||
"eslint-config-prettier": "^9.0.0",
|
||||
"eslint-plugin-i18next": "^6.0.3",
|
||||
"eslint-plugin-path": "^1.2.2",
|
||||
"eslint-plugin-react": "^7.33.2",
|
||||
"eslint-plugin-react-hooks": "^4.6.0",
|
||||
"husky": "^8.0.3",
|
||||
|
||||
@@ -90,7 +90,16 @@
|
||||
"openInNewTab": "In einem neuem Tab öffnen",
|
||||
"statusProcessing": "wird bearbeitet",
|
||||
"linear": "Linear",
|
||||
"imagePrompt": "Bild Prompt"
|
||||
"imagePrompt": "Bild Prompt",
|
||||
"checkpoint": "Checkpoint",
|
||||
"inpaint": "inpaint",
|
||||
"simple": "Einfach",
|
||||
"template": "Vorlage",
|
||||
"outputs": "Ausgabe",
|
||||
"data": "Daten",
|
||||
"safetensors": "Safetensors",
|
||||
"outpaint": "outpaint",
|
||||
"details": "Details"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Erzeugungen",
|
||||
@@ -110,7 +119,6 @@
|
||||
"preparingDownload": "bereite Download vor",
|
||||
"preparingDownloadFailed": "Problem beim Download vorbereiten",
|
||||
"deleteImage": "Lösche Bild",
|
||||
"images": "Bilder",
|
||||
"copy": "Kopieren",
|
||||
"download": "Runterladen",
|
||||
"setCurrentImage": "Setze aktuelle Bild",
|
||||
@@ -120,7 +128,8 @@
|
||||
"downloadSelection": "Auswahl herunterladen",
|
||||
"currentlyInUse": "Dieses Bild wird derzeit in den folgenden Funktionen verwendet:",
|
||||
"deleteImagePermanent": "Gelöschte Bilder können nicht wiederhergestellt werden.",
|
||||
"autoAssignBoardOnClick": "Board per Klick automatisch zuweisen"
|
||||
"autoAssignBoardOnClick": "Board per Klick automatisch zuweisen",
|
||||
"noImageSelected": "Kein Bild ausgewählt"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Tastenkürzel",
|
||||
@@ -454,7 +463,7 @@
|
||||
"quickAdd": "Schnell hinzufügen",
|
||||
"simpleModelDesc": "Geben Sie einen Pfad zu einem lokalen Diffusers-Modell, einem lokalen Checkpoint-/Safetensors-Modell, einer HuggingFace-Repo-ID oder einer Checkpoint-/Diffusers-Modell-URL an.",
|
||||
"modelDeleted": "Modell gelöscht",
|
||||
"inpainting": "v1 Ausmalen",
|
||||
"inpainting": "v1 Inpainting",
|
||||
"modelUpdateFailed": "Modellaktualisierung fehlgeschlagen",
|
||||
"useCustomConfig": "Benutzerdefinierte Konfiguration verwenden",
|
||||
"settings": "Einstellungen",
|
||||
@@ -473,7 +482,10 @@
|
||||
"variant": "Variante",
|
||||
"loraModels": "LoRAs",
|
||||
"modelDeleteFailed": "Modell konnte nicht gelöscht werden",
|
||||
"mergedModelName": "Zusammengeführter Modellname"
|
||||
"mergedModelName": "Zusammengeführter Modellname",
|
||||
"checkpointOrSafetensors": "$t(common.checkpoint) / $t(common.safetensors)",
|
||||
"formMessageDiffusersModelLocation": "Diffusers Modell Speicherort",
|
||||
"noModelSelected": "Kein Modell ausgewählt"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Bilder",
|
||||
@@ -683,7 +695,8 @@
|
||||
"exitViewer": "Betrachten beenden",
|
||||
"menu": "Menü",
|
||||
"loadMore": "Mehr laden",
|
||||
"invokeProgressBar": "Invoke Fortschrittsanzeige"
|
||||
"invokeProgressBar": "Invoke Fortschrittsanzeige",
|
||||
"mode": "Modus"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Automatisches Hinzufügen zum Ordner",
|
||||
@@ -701,7 +714,11 @@
|
||||
"changeBoard": "Ordner wechseln",
|
||||
"loading": "Laden...",
|
||||
"clearSearch": "Suche leeren",
|
||||
"bottomMessage": "Durch das Löschen dieses Ordners und seiner Bilder werden alle Funktionen zurückgesetzt, die sie derzeit verwenden."
|
||||
"bottomMessage": "Durch das Löschen dieses Ordners und seiner Bilder werden alle Funktionen zurückgesetzt, die sie derzeit verwenden.",
|
||||
"deleteBoardOnly": "Nur Ordner löschen",
|
||||
"deleteBoard": "Löschen Ordner",
|
||||
"deleteBoardAndImages": "Löschen Ordner und Bilder",
|
||||
"deletedBoardsCannotbeRestored": "Gelöschte Ordner könnte nicht wiederhergestellt werden"
|
||||
},
|
||||
"controlnet": {
|
||||
"showAdvanced": "Zeige Erweitert",
|
||||
@@ -786,7 +803,8 @@
|
||||
"canny": "Canny",
|
||||
"hedDescription": "Ganzheitlich verschachtelte Kantenerkennung",
|
||||
"scribble": "Scribble",
|
||||
"maxFaces": "Maximal Anzahl Gesichter"
|
||||
"maxFaces": "Maximal Anzahl Gesichter",
|
||||
"unstarImage": "Markierung aufheben"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@@ -840,7 +858,8 @@
|
||||
"pauseTooltip": "Pause von Prozessor",
|
||||
"back": "Hinten",
|
||||
"resumeSucceeded": "Prozessor wieder aufgenommen",
|
||||
"resumeTooltip": "Prozessor wieder aufnehmen"
|
||||
"resumeTooltip": "Prozessor wieder aufnehmen",
|
||||
"time": "Zeit"
|
||||
},
|
||||
"metadata": {
|
||||
"negativePrompt": "Negativ Beschreibung",
|
||||
@@ -868,7 +887,8 @@
|
||||
"vae": "VAE",
|
||||
"workflow": "Arbeitsablauf",
|
||||
"scheduler": "Scheduler",
|
||||
"noRecallParameters": "Es wurden keine Parameter zum Abrufen gefunden"
|
||||
"noRecallParameters": "Es wurden keine Parameter zum Abrufen gefunden",
|
||||
"recallParameters": "Recall Parameters"
|
||||
},
|
||||
"popovers": {
|
||||
"noiseUseCPU": {
|
||||
@@ -944,7 +964,9 @@
|
||||
"booleanCollection": "Boolesche Werte Sammlung",
|
||||
"cannotConnectToSelf": "Es kann keine Verbindung zu sich selbst hergestellt werden",
|
||||
"colorCodeEdges": "Farbkodierte Kanten",
|
||||
"addNodeToolTip": "Knoten hinzufügen (Umschalt+A, Leertaste)"
|
||||
"addNodeToolTip": "Knoten hinzufügen (Umschalt+A, Leertaste)",
|
||||
"boardField": "Ordner",
|
||||
"boardFieldDescription": "Ein Galerie Ordner"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Aktivieren Sie die Korrektur für hohe Auflösungen",
|
||||
@@ -968,6 +990,8 @@
|
||||
"selectModel": "Wählen ein Modell aus",
|
||||
"noRefinerModelsInstalled": "Keine SDXL Refiner-Modelle installiert",
|
||||
"noLoRAsInstalled": "Keine LoRAs installiert",
|
||||
"selectLoRA": "Wählen ein LoRA aus"
|
||||
"selectLoRA": "Wählen ein LoRA aus",
|
||||
"esrganModel": "ESRGAN Modell",
|
||||
"addLora": "LoRA hinzufügen"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,19 @@
|
||||
{
|
||||
"accessibility": {
|
||||
"copyMetadataJson": "Copy metadata JSON",
|
||||
"createIssue": "Create Issue",
|
||||
"exitViewer": "Exit Viewer",
|
||||
"flipHorizontally": "Flip Horizontally",
|
||||
"flipVertically": "Flip Vertically",
|
||||
"invokeProgressBar": "Invoke progress bar",
|
||||
"menu": "Menu",
|
||||
"mode": "Mode",
|
||||
"modelSelect": "Model Select",
|
||||
"modifyConfig": "Modify Config",
|
||||
"nextImage": "Next Image",
|
||||
"previousImage": "Previous Image",
|
||||
"reset": "Reset",
|
||||
"resetUI": "$t(accessibility.reset) UI",
|
||||
"rotateClockwise": "Rotate Clockwise",
|
||||
"rotateCounterClockwise": "Rotate Counter-Clockwise",
|
||||
"showGalleryPanel": "Show Gallery Panel",
|
||||
@@ -30,9 +33,15 @@
|
||||
"cancel": "Cancel",
|
||||
"changeBoard": "Change Board",
|
||||
"clearSearch": "Clear Search",
|
||||
"deleteBoard": "Delete Board",
|
||||
"deleteBoardAndImages": "Delete Board and Images",
|
||||
"deleteBoardOnly": "Delete Board Only",
|
||||
"deletedBoardsCannotbeRestored": "Deleted boards cannot be restored",
|
||||
"loading": "Loading...",
|
||||
"menuItemAutoAdd": "Auto-add to this Board",
|
||||
"move": "Move",
|
||||
"movingImagesToBoard_one": "Moving {{count}} image to board:",
|
||||
"movingImagesToBoard_other": "Moving {{count}} images to board:",
|
||||
"myBoard": "My Board",
|
||||
"noMatching": "No matching Boards",
|
||||
"searchBoard": "Search Boards...",
|
||||
@@ -44,27 +53,39 @@
|
||||
"common": {
|
||||
"accept": "Accept",
|
||||
"advanced": "Advanced",
|
||||
"ai": "ai",
|
||||
"areYouSure": "Are you sure?",
|
||||
"auto": "Auto",
|
||||
"back": "Back",
|
||||
"batch": "Batch Manager",
|
||||
"cancel": "Cancel",
|
||||
"copyError": "$t(gallery.copy) Error",
|
||||
"close": "Close",
|
||||
"on": "On",
|
||||
"checkpoint": "Checkpoint",
|
||||
"communityLabel": "Community",
|
||||
"controlNet": "ControlNet",
|
||||
"controlAdapter": "Control Adapter",
|
||||
"data": "Data",
|
||||
"details": "Details",
|
||||
"ipAdapter": "IP Adapter",
|
||||
"t2iAdapter": "T2I Adapter",
|
||||
"darkMode": "Dark Mode",
|
||||
"discordLabel": "Discord",
|
||||
"dontAskMeAgain": "Don't ask me again",
|
||||
"error": "Error",
|
||||
"file": "File",
|
||||
"folder": "Folder",
|
||||
"format": "format",
|
||||
"generate": "Generate",
|
||||
"githubLabel": "Github",
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"imagePrompt": "Image Prompt",
|
||||
"imageFailedToLoad": "Unable to Load Image",
|
||||
"img2img": "Image To Image",
|
||||
"inpaint": "inpaint",
|
||||
"input": "Input",
|
||||
"installed": "Installed",
|
||||
"langArabic": "العربية",
|
||||
"langBrPortuguese": "Português do Brasil",
|
||||
"langDutch": "Nederlands",
|
||||
@@ -92,7 +113,10 @@
|
||||
"nodeEditor": "Node Editor",
|
||||
"nodes": "Workflow Editor",
|
||||
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
|
||||
"notInstalled": "Not $t(common.installed)",
|
||||
"openInNewTab": "Open in New Tab",
|
||||
"outpaint": "outpaint",
|
||||
"outputs": "Outputs",
|
||||
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
|
||||
"postProcessDesc2": "A dedicated UI will be released soon to facilitate more advanced post processing workflows.",
|
||||
"postProcessDesc3": "The Invoke AI Command Line Interface offers various other features including Embiggen.",
|
||||
@@ -100,7 +124,10 @@
|
||||
"postProcessing": "Post Processing",
|
||||
"random": "Random",
|
||||
"reportBugLabel": "Report Bug",
|
||||
"safetensors": "Safetensors",
|
||||
"settingsLabel": "Settings",
|
||||
"simple": "Simple",
|
||||
"somethingWentWrong": "Something went wrong",
|
||||
"statusConnected": "Connected",
|
||||
"statusConvertingModel": "Converting Model",
|
||||
"statusDisconnected": "Disconnected",
|
||||
@@ -127,11 +154,13 @@
|
||||
"statusSavingImage": "Saving Image",
|
||||
"statusUpscaling": "Upscaling",
|
||||
"statusUpscalingESRGAN": "Upscaling (ESRGAN)",
|
||||
"template": "Template",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddourings using Textual Inversion using the main script.",
|
||||
"txt2img": "Text To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"unknown": "Unknown",
|
||||
"upload": "Upload"
|
||||
},
|
||||
"controlnet": {
|
||||
@@ -214,6 +243,7 @@
|
||||
"setControlImageDimensions": "Set Control Image Dimensions To W/H",
|
||||
"showAdvanced": "Show Advanced",
|
||||
"toggleControlNet": "Toggle this ControlNet",
|
||||
"unstarImage": "Unstar Image",
|
||||
"w": "W",
|
||||
"weight": "Weight",
|
||||
"enableIPAdapter": "Enable IP Adapter",
|
||||
@@ -237,6 +267,7 @@
|
||||
"embedding": {
|
||||
"addEmbedding": "Add Embedding",
|
||||
"incompatibleModel": "Incompatible base model:",
|
||||
"noEmbeddingsLoaded": "No Embeddings Loaded",
|
||||
"noMatchingEmbedding": "No matching Embeddings"
|
||||
},
|
||||
"queue": {
|
||||
@@ -279,6 +310,7 @@
|
||||
"next": "Next",
|
||||
"status": "Status",
|
||||
"total": "Total",
|
||||
"time": "Time",
|
||||
"pending": "Pending",
|
||||
"in_progress": "In Progress",
|
||||
"completed": "Completed",
|
||||
@@ -286,6 +318,7 @@
|
||||
"canceled": "Canceled",
|
||||
"completedIn": "Completed in",
|
||||
"batch": "Batch",
|
||||
"batchFieldValues": "Batch Field Values",
|
||||
"item": "Item",
|
||||
"session": "Session",
|
||||
"batchValues": "Batch Values",
|
||||
@@ -313,7 +346,8 @@
|
||||
"enableFailed": "Problem Enabling Invocation Cache",
|
||||
"disable": "Disable",
|
||||
"disableSucceeded": "Invocation Cache Disabled",
|
||||
"disableFailed": "Problem Disabling Invocation Cache"
|
||||
"disableFailed": "Problem Disabling Invocation Cache",
|
||||
"useCache": "Use Cache"
|
||||
},
|
||||
"gallery": {
|
||||
"allImagesLoaded": "All Images Loaded",
|
||||
@@ -322,6 +356,9 @@
|
||||
"autoSwitchNewImages": "Auto-Switch to New Images",
|
||||
"copy": "Copy",
|
||||
"currentlyInUse": "This image is currently in use in the following features:",
|
||||
"drop": "Drop",
|
||||
"dropOrUpload": "$t(gallery.drop) or Upload",
|
||||
"dropToUpload": "$t(gallery.drop) to Upload",
|
||||
"deleteImage": "Delete Image",
|
||||
"deleteImageBin": "Deleted images will be sent to your operating system's Bin.",
|
||||
"deleteImagePermanent": "Deleted images cannot be restored.",
|
||||
@@ -331,10 +368,11 @@
|
||||
"galleryImageSize": "Image Size",
|
||||
"gallerySettings": "Gallery Settings",
|
||||
"generations": "Generations",
|
||||
"images": "Images",
|
||||
"image": "image",
|
||||
"loading": "Loading",
|
||||
"loadMore": "Load More",
|
||||
"maintainAspectRatio": "Maintain Aspect Ratio",
|
||||
"noImageSelected": "No Image Selected",
|
||||
"noImagesInGallery": "No Images to Display",
|
||||
"setCurrentImage": "Set as Current Image",
|
||||
"showGenerations": "Show Generations",
|
||||
@@ -342,6 +380,7 @@
|
||||
"singleColumnLayout": "Single Column Layout",
|
||||
"unableToLoad": "Unable to load Gallery",
|
||||
"uploads": "Uploads",
|
||||
"deleteSelection": "Delete Selection",
|
||||
"downloadSelection": "Download Selection",
|
||||
"preparingDownload": "Preparing Download",
|
||||
"preparingDownloadFailed": "Problem Preparing Download"
|
||||
@@ -560,6 +599,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"cfgScale": "CFG scale",
|
||||
"cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)",
|
||||
"createdBy": "Created By",
|
||||
"fit": "Image to image fit",
|
||||
"generationMode": "Generation Mode",
|
||||
@@ -583,7 +623,7 @@
|
||||
"strength": "Image to image strength",
|
||||
"Threshold": "Noise Threshold",
|
||||
"variations": "Seed-weight pairs",
|
||||
"vae": "VAE",
|
||||
"vae": "VAE",
|
||||
"width": "Width",
|
||||
"workflow": "Workflow"
|
||||
},
|
||||
@@ -606,10 +646,12 @@
|
||||
"cannotUseSpaces": "Cannot Use Spaces",
|
||||
"checkpointFolder": "Checkpoint Folder",
|
||||
"checkpointModels": "Checkpoints",
|
||||
"checkpointOrSafetensors": "$t(common.checkpoint) / $t(common.safetensors)",
|
||||
"clearCheckpointFolder": "Clear Checkpoint Folder",
|
||||
"closeAdvanced": "Close Advanced",
|
||||
"config": "Config",
|
||||
"configValidationMsg": "Path to the config file of your model.",
|
||||
"conversionNotSupported": "Conversion Not Supported",
|
||||
"convert": "Convert",
|
||||
"convertingModelBegin": "Converting Model. Please wait.",
|
||||
"convertToDiffusers": "Convert To Diffusers",
|
||||
@@ -685,6 +727,7 @@
|
||||
"nameValidationMsg": "Enter a name for your model",
|
||||
"noCustomLocationProvided": "No Custom Location Provided",
|
||||
"noModels": "No Models Found",
|
||||
"noModelSelected": "No Model Selected",
|
||||
"noModelsFound": "No Models Found",
|
||||
"none": "none",
|
||||
"notLoaded": "not loaded",
|
||||
@@ -730,8 +773,11 @@
|
||||
"widthValidationMsg": "Default width of your model."
|
||||
},
|
||||
"models": {
|
||||
"addLora": "Add LoRA",
|
||||
"esrganModel": "ESRGAN Model",
|
||||
"loading": "loading",
|
||||
"noLoRAsAvailable": "No LoRAs available",
|
||||
"noLoRAsLoaded": "No LoRAs Loaded",
|
||||
"noMatchingLoRAs": "No matching LoRAs",
|
||||
"noMatchingModels": "No matching Models",
|
||||
"noModelsAvailable": "No models available",
|
||||
@@ -743,6 +789,7 @@
|
||||
"nodes": {
|
||||
"addNode": "Add Node",
|
||||
"addNodeToolTip": "Add Node (Shift+A, Space)",
|
||||
"addLinearView": "Add to Linear View",
|
||||
"animatedEdges": "Animated Edges",
|
||||
"animatedEdgesHelp": "Animate selected edges and edges connected to selected nodes",
|
||||
"boardField": "Board",
|
||||
@@ -757,9 +804,12 @@
|
||||
"cannotConnectOutputToOutput": "Cannot connect output to output",
|
||||
"cannotConnectToSelf": "Cannot connect to self",
|
||||
"cannotDuplicateConnection": "Cannot create duplicate connections",
|
||||
"nodePack": "Node pack",
|
||||
"clipField": "Clip",
|
||||
"clipFieldDescription": "Tokenizer and text_encoder submodels.",
|
||||
"collection": "Collection",
|
||||
"collectionFieldType": "{{name}} Collection",
|
||||
"collectionOrScalarFieldType": "{{name}} Collection|Scalar",
|
||||
"collectionDescription": "TODO",
|
||||
"collectionItem": "Collection Item",
|
||||
"collectionItemDescription": "TODO",
|
||||
@@ -846,10 +896,15 @@
|
||||
"mainModelField": "Model",
|
||||
"mainModelFieldDescription": "TODO",
|
||||
"maybeIncompatible": "May be Incompatible With Installed",
|
||||
"mismatchedVersion": "Has Mismatched Version",
|
||||
"mismatchedVersion": "Invalid node: node {{node}} of type {{type}} has mismatched version (try updating?)",
|
||||
"missingCanvaInitImage": "Missing canvas init image",
|
||||
"missingCanvaInitMaskImages": "Missing canvas init and mask images",
|
||||
"missingTemplate": "Missing Template",
|
||||
"missingTemplate": "Invalid node: node {{node}} of type {{type}} missing template (not installed?)",
|
||||
"sourceNodeDoesNotExist": "Invalid edge: source/output node {{node}} does not exist",
|
||||
"targetNodeDoesNotExist": "Invalid edge: target/input node {{node}} does not exist",
|
||||
"sourceNodeFieldDoesNotExist": "Invalid edge: source/output field {{node}}.{{field}} does not exist",
|
||||
"targetNodeFieldDoesNotExist": "Invalid edge: target/input field {{node}}.{{field}} does not exist",
|
||||
"deletedInvalidEdge": "Deleted invalid edge {{source}} -> {{target}}",
|
||||
"noConnectionData": "No connection data",
|
||||
"noConnectionInProgress": "No connection in progress",
|
||||
"node": "Node",
|
||||
@@ -863,6 +918,7 @@
|
||||
"noMatchingNodes": "No matching nodes",
|
||||
"noNodeSelected": "No node selected",
|
||||
"nodeOpacity": "Node Opacity",
|
||||
"nodeVersion": "Node Version",
|
||||
"noOutputRecorded": "No outputs recorded",
|
||||
"noOutputSchemaName": "No output schema name found in ref object",
|
||||
"notes": "Notes",
|
||||
@@ -870,6 +926,7 @@
|
||||
"oNNXModelField": "ONNX Model",
|
||||
"oNNXModelFieldDescription": "ONNX model field.",
|
||||
"outputField": "Output Field",
|
||||
"outputFieldInInput": "Output field in input",
|
||||
"outputFields": "Output Fields",
|
||||
"outputNode": "Output node",
|
||||
"outputSchemaNotFound": "Output schema not found",
|
||||
@@ -907,21 +964,36 @@
|
||||
"stringDescription": "Strings are text.",
|
||||
"stringPolymorphic": "String Polymorphic",
|
||||
"stringPolymorphicDescription": "A collection of strings.",
|
||||
"unableToLoadWorkflow": "Unable to Validate Workflow",
|
||||
"unableToLoadWorkflow": "Unable to Load Workflow",
|
||||
"unableToParseEdge": "Unable to parse edge",
|
||||
"unableToParseNode": "Unable to parse node",
|
||||
"unableToUpdateNode": "Unable to update node",
|
||||
"unableToValidateWorkflow": "Unable to Validate Workflow",
|
||||
"unableToMigrateWorkflow": "Unable to Migrate Workflow",
|
||||
"unknownErrorValidatingWorkflow": "Unknown error validating workflow",
|
||||
"inputFieldTypeParseError": "Unable to parse type of input field {{node}}.{{field}} ({{message}})",
|
||||
"outputFieldTypeParseError": "Unable to parse type of output field {{node}}.{{field}} ({{message}})",
|
||||
"unableToExtractSchemaNameFromRef": "unable to extract schema name from ref",
|
||||
"unsupportedArrayItemType": "unsupported array item type \"{{type}}\"",
|
||||
"unsupportedAnyOfLength": "too many union members ({{count}})",
|
||||
"unsupportedMismatchedUnion": "mismatched CollectionOrScalar type with base types {{firstType}} and {{secondType}}",
|
||||
"unableToParseFieldType": "unable to parse field type",
|
||||
"uNetField": "UNet",
|
||||
"uNetFieldDescription": "UNet submodel.",
|
||||
"unhandledInputProperty": "Unhandled input property",
|
||||
"unhandledOutputProperty": "Unhandled output property",
|
||||
"unknownField": "Unknown Field",
|
||||
"unknownField": "Unknown field",
|
||||
"unknownFieldType": "$t(nodes.unknownField) type: {{type}}",
|
||||
"unknownNode": "Unknown Node",
|
||||
"unknownNodeType": "Unknown node type",
|
||||
"unknownTemplate": "Unknown Template",
|
||||
"unknownInput": "Unknown input: {{name}}",
|
||||
"unkownInvocation": "Unknown Invocation type",
|
||||
"unknownOutput": "Unknown output: {{name}}",
|
||||
"updateNode": "Update Node",
|
||||
"updateAllNodes": "Update All Nodes",
|
||||
"updateApp": "Update App",
|
||||
"updateAllNodes": "Update Nodes",
|
||||
"allNodesUpdated": "All Nodes Updated",
|
||||
"unableToUpdateNodes_one": "Unable to update {{count}} node",
|
||||
"unableToUpdateNodes_other": "Unable to update {{count}} nodes",
|
||||
"vaeField": "Vae",
|
||||
@@ -930,6 +1002,8 @@
|
||||
"vaeModelFieldDescription": "TODO",
|
||||
"validateConnections": "Validate Connections and Graph",
|
||||
"validateConnectionsHelp": "Prevent invalid connections from being made, and invalid graphs from being invoked",
|
||||
"unableToGetWorkflowVersion": "Unable to get workflow schema version",
|
||||
"unrecognizedWorkflowVersion": "Unrecognized workflow schema version {{version}}",
|
||||
"version": "Version",
|
||||
"versionUnknown": " Version Unknown",
|
||||
"workflow": "Workflow",
|
||||
@@ -959,6 +1033,8 @@
|
||||
"setType": "Set cancel type"
|
||||
},
|
||||
"cfgScale": "CFG Scale",
|
||||
"cfgRescaleMultiplier": "CFG Rescale Multiplier",
|
||||
"cfgRescale": "CFG Rescale",
|
||||
"clipSkip": "CLIP Skip",
|
||||
"clipSkipWithLayerCount": "CLIP Skip {{layerCount}}",
|
||||
"closeViewer": "Close Viewer",
|
||||
@@ -1010,6 +1086,7 @@
|
||||
"maskAdjustmentsHeader": "Mask Adjustments",
|
||||
"maskBlur": "Blur",
|
||||
"maskBlurMethod": "Blur Method",
|
||||
"maskEdge": "Mask Edge",
|
||||
"negativePromptPlaceholder": "Negative Prompt",
|
||||
"noiseSettings": "Noise",
|
||||
"noiseThreshold": "Noise Threshold",
|
||||
@@ -1057,7 +1134,9 @@
|
||||
"upscale": "Upscale (Shift + U)",
|
||||
"upscaleImage": "Upscale Image",
|
||||
"upscaling": "Upscaling",
|
||||
"unmasked": "Unmasked",
|
||||
"useAll": "Use All",
|
||||
"useSize": "Use Size",
|
||||
"useCpuNoise": "Use CPU Noise",
|
||||
"cpuNoise": "CPU Noise",
|
||||
"gpuNoise": "GPU Noise",
|
||||
@@ -1078,6 +1157,7 @@
|
||||
"dynamicPrompts": "Dynamic Prompts",
|
||||
"enableDynamicPrompts": "Enable Dynamic Prompts",
|
||||
"maxPrompts": "Max Prompts",
|
||||
"promptsPreview": "Prompts Preview",
|
||||
"promptsWithCount_one": "{{count}} Prompt",
|
||||
"promptsWithCount_other": "{{count}} Prompts",
|
||||
"seedBehaviour": {
|
||||
@@ -1117,7 +1197,10 @@
|
||||
"displayHelpIcons": "Display Help Icons",
|
||||
"displayInProgress": "Display Progress Images",
|
||||
"enableImageDebugging": "Enable Image Debugging",
|
||||
"enableInformationalPopovers": "Enable Informational Popovers",
|
||||
"enableInvisibleWatermark": "Enable Invisible Watermark",
|
||||
"enableNodesEditor": "Enable Nodes Editor",
|
||||
"enableNSFWChecker": "Enable NSFW Checker",
|
||||
"experimental": "Experimental",
|
||||
"favoriteSchedulers": "Favorite Schedulers",
|
||||
"favoriteSchedulersPlaceholder": "No schedulers favorited",
|
||||
@@ -1143,7 +1226,8 @@
|
||||
"clearIntermediatesWithCount_other": "Clear {{count}} Intermediates",
|
||||
"intermediatesCleared_one": "Cleared {{count}} Intermediate",
|
||||
"intermediatesCleared_other": "Cleared {{count}} Intermediates",
|
||||
"intermediatesClearedFailed": "Problem Clearing Intermediates"
|
||||
"intermediatesClearedFailed": "Problem Clearing Intermediates",
|
||||
"reloadingIn": "Reloading in"
|
||||
},
|
||||
"toast": {
|
||||
"addedToBoard": "Added to board",
|
||||
@@ -1171,6 +1255,7 @@
|
||||
"initialImageNotSet": "Initial Image Not Set",
|
||||
"initialImageNotSetDesc": "Could not load initial image",
|
||||
"initialImageSet": "Initial Image Set",
|
||||
"invalidUpload": "Invalid Upload",
|
||||
"loadedWithWarnings": "Workflow Loaded with Warnings",
|
||||
"maskSavedAssets": "Mask Saved to Assets",
|
||||
"maskSentControlnetAssets": "Mask Sent to ControlNet & Assets",
|
||||
@@ -1217,7 +1302,8 @@
|
||||
"sentToImageToImage": "Sent To Image To Image",
|
||||
"sentToUnifiedCanvas": "Sent to Unified Canvas",
|
||||
"serverError": "Server Error",
|
||||
"setCanvasInitialImage": "Set as canvas initial image",
|
||||
"setAsCanvasInitialImage": "Set as canvas initial image",
|
||||
"setCanvasInitialImage": "Set canvas initial image",
|
||||
"setControlImage": "Set as control image",
|
||||
"setIPAdapterImage": "Set as IP Adapter Image",
|
||||
"setInitialImage": "Set as initial image",
|
||||
@@ -1387,6 +1473,12 @@
|
||||
"Controls how much your prompt influences the generation process."
|
||||
]
|
||||
},
|
||||
"paramCFGRescaleMultiplier": {
|
||||
"heading": "CFG Rescale Multiplier",
|
||||
"paragraphs": [
|
||||
"Rescale multiplier for CFG guidance, used for models trained using zero-terminal SNR (ztsnr). Suggested value 0.7."
|
||||
]
|
||||
},
|
||||
"paramDenoisingStrength": {
|
||||
"heading": "Denoising Strength",
|
||||
"paragraphs": [
|
||||
@@ -1478,7 +1570,7 @@
|
||||
"clearCanvasHistoryConfirm": "Are you sure you want to clear the canvas history?",
|
||||
"clearCanvasHistoryMessage": "Clearing the canvas history leaves your current canvas intact, but irreversibly clears the undo and redo history.",
|
||||
"clearHistory": "Clear History",
|
||||
"clearMask": "Clear Mask",
|
||||
"clearMask": "Clear Mask (Shift+C)",
|
||||
"colorPicker": "Color Picker",
|
||||
"copyToClipboard": "Copy to Clipboard",
|
||||
"cursorPosition": "Cursor Position",
|
||||
@@ -1505,6 +1597,7 @@
|
||||
"redo": "Redo",
|
||||
"resetView": "Reset View",
|
||||
"saveBoxRegionOnly": "Save Box Region Only",
|
||||
"saveMask": "Save $t(unifiedCanvas.mask)",
|
||||
"saveToGallery": "Save To Gallery",
|
||||
"scaledBoundingBox": "Scaled Bounding Box",
|
||||
"showCanvasDebugInfo": "Show Additional Canvas Info",
|
||||
|
||||
@@ -98,7 +98,6 @@
|
||||
"deleteImage": "Eliminar Imagen",
|
||||
"deleteImageBin": "Las imágenes eliminadas se enviarán a la papelera de tu sistema operativo.",
|
||||
"deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.",
|
||||
"images": "Imágenes",
|
||||
"assets": "Activos",
|
||||
"autoAssignBoardOnClick": "Asignación automática de tableros al hacer clic"
|
||||
},
|
||||
|
||||
@@ -89,7 +89,9 @@
|
||||
"t2iAdapter": "Adattatore T2I",
|
||||
"controlAdapter": "Adattatore di Controllo",
|
||||
"controlNet": "ControlNet",
|
||||
"auto": "Automatico"
|
||||
"auto": "Automatico",
|
||||
"simple": "Semplice",
|
||||
"details": "Dettagli"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Generazioni",
|
||||
@@ -108,7 +110,6 @@
|
||||
"deleteImage": "Elimina l'immagine",
|
||||
"deleteImagePermanent": "Le immagini eliminate non possono essere ripristinate.",
|
||||
"deleteImageBin": "Le immagini eliminate verranno spostate nel Cestino del tuo sistema operativo.",
|
||||
"images": "Immagini",
|
||||
"assets": "Risorse",
|
||||
"autoAssignBoardOnClick": "Assegna automaticamente la bacheca al clic",
|
||||
"featuresWillReset": "Se elimini questa immagine, quelle funzionalità verranno immediatamente ripristinate.",
|
||||
@@ -120,7 +121,8 @@
|
||||
"setCurrentImage": "Imposta come immagine corrente",
|
||||
"preparingDownload": "Preparazione del download",
|
||||
"preparingDownloadFailed": "Problema durante la preparazione del download",
|
||||
"downloadSelection": "Scarica gli elementi selezionati"
|
||||
"downloadSelection": "Scarica gli elementi selezionati",
|
||||
"noImageSelected": "Nessuna immagine selezionata"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Tasti rapidi",
|
||||
@@ -395,7 +397,7 @@
|
||||
"deleteModel": "Elimina modello",
|
||||
"deleteConfig": "Elimina configurazione",
|
||||
"deleteMsg1": "Sei sicuro di voler eliminare questo modello da InvokeAI?",
|
||||
"deleteMsg2": "Questo eliminerà il modello dal disco se si trova nella cartella principale di InvokeAI. Se utilizzi una cartella personalizzata, il modello NON verrà eliminato dal disco.",
|
||||
"deleteMsg2": "Questo eliminerà il modello dal disco se si trova nella cartella principale di InvokeAI. Se invece utilizzi una cartella personalizzata, il modello NON verrà eliminato dal disco.",
|
||||
"formMessageDiffusersModelLocation": "Ubicazione modelli diffusori",
|
||||
"formMessageDiffusersModelLocationDesc": "Inseriscine almeno uno.",
|
||||
"formMessageDiffusersVAELocation": "Ubicazione file VAE",
|
||||
@@ -429,7 +431,7 @@
|
||||
"mergedModelSaveLocation": "Ubicazione salvataggio",
|
||||
"convertToDiffusersHelpText1": "Questo modello verrà convertito nel formato 🧨 Diffusore.",
|
||||
"custom": "Personalizzata",
|
||||
"convertToDiffusersHelpText3": "Il file checkpoint su disco SARÀ eliminato se si trova nella cartella principale di InvokeAI. Se si trova in una posizione personalizzata, NON verrà eliminato.",
|
||||
"convertToDiffusersHelpText3": "Il file Checkpoint su disco verrà eliminato se si trova nella cartella principale di InvokeAI. Se si trova invece in una posizione personalizzata, NON verrà eliminato.",
|
||||
"v1": "v1",
|
||||
"pathToCustomConfig": "Percorso alla configurazione personalizzata",
|
||||
"modelThree": "Modello 3",
|
||||
@@ -456,7 +458,7 @@
|
||||
"modelDeleteFailed": "Impossibile eliminare il modello",
|
||||
"noCustomLocationProvided": "Nessuna posizione personalizzata fornita",
|
||||
"convertingModelBegin": "Conversione del modello. Attendere prego.",
|
||||
"importModels": "Importa modelli",
|
||||
"importModels": "Importa Modelli",
|
||||
"modelsSynced": "Modelli sincronizzati",
|
||||
"modelSyncFailed": "Sincronizzazione modello non riuscita",
|
||||
"settings": "Impostazioni",
|
||||
@@ -474,7 +476,8 @@
|
||||
"closeAdvanced": "Chiudi Avanzate",
|
||||
"modelType": "Tipo di modello",
|
||||
"customConfigFileLocation": "Posizione del file di configurazione personalizzato",
|
||||
"vaePrecision": "Precisione VAE"
|
||||
"vaePrecision": "Precisione VAE",
|
||||
"noModelSelected": "Nessun modello selezionato"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -601,7 +604,9 @@
|
||||
"seamlessX": "Senza cuciture X",
|
||||
"seamlessY": "Senza cuciture Y",
|
||||
"imageActions": "Azioni Immagine",
|
||||
"aspectRatioFree": "Libere"
|
||||
"aspectRatioFree": "Libere",
|
||||
"maskEdge": "Maschera i bordi",
|
||||
"unmasked": "No maschera"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelli",
|
||||
@@ -642,7 +647,10 @@
|
||||
"clearIntermediatesWithCount_one": "Cancella {{count}} immagine intermedia",
|
||||
"clearIntermediatesWithCount_many": "Cancella {{count}} immagini intermedie",
|
||||
"clearIntermediatesWithCount_other": "Cancella {{count}} immagini intermedie",
|
||||
"clearIntermediatesDisabled": "La coda deve essere vuota per cancellare le immagini intermedie"
|
||||
"clearIntermediatesDisabled": "La coda deve essere vuota per cancellare le immagini intermedie",
|
||||
"enableNSFWChecker": "Abilita controllo NSFW",
|
||||
"enableInvisibleWatermark": "Abilita filigrana invisibile",
|
||||
"enableInformationalPopovers": "Abilita testo informativo a comparsa"
|
||||
},
|
||||
"toast": {
|
||||
"tempFoldersEmptied": "Cartella temporanea svuotata",
|
||||
@@ -727,7 +735,8 @@
|
||||
"setCanvasInitialImage": "Imposta come immagine iniziale della tela",
|
||||
"workflowLoaded": "Flusso di lavoro caricato",
|
||||
"setIPAdapterImage": "Imposta come immagine per l'Adattatore IP",
|
||||
"problemSavingMaskDesc": "Impossibile salvare la maschera"
|
||||
"problemSavingMaskDesc": "Impossibile salvare la maschera",
|
||||
"setAsCanvasInitialImage": "Imposta come immagine iniziale della tela"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
@@ -828,7 +837,8 @@
|
||||
"modifyConfig": "Modifica configurazione",
|
||||
"menu": "Menu",
|
||||
"showGalleryPanel": "Mostra il pannello Galleria",
|
||||
"loadMore": "Carica altro"
|
||||
"loadMore": "Carica altro",
|
||||
"mode": "Modalità"
|
||||
},
|
||||
"ui": {
|
||||
"hideProgressImages": "Nascondi avanzamento immagini",
|
||||
@@ -1026,7 +1036,11 @@
|
||||
"unableToParseEdge": "Impossibile analizzare il bordo",
|
||||
"latentsCollectionDescription": "Le immagini latenti possono essere passate tra i nodi.",
|
||||
"imageCollection": "Raccolta Immagini",
|
||||
"loRAModelField": "LoRA"
|
||||
"loRAModelField": "LoRA",
|
||||
"updateAllNodes": "Aggiorna tutti i nodi",
|
||||
"unableToUpdateNodes_one": "Impossibile aggiornare {{count}} nodo",
|
||||
"unableToUpdateNodes_many": "Impossibile aggiornare {{count}} nodi",
|
||||
"unableToUpdateNodes_other": "Impossibile aggiornare {{count}} nodi"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@@ -1044,7 +1058,11 @@
|
||||
"noMatching": "Nessuna bacheca corrispondente",
|
||||
"selectBoard": "Seleziona una Bacheca",
|
||||
"uncategorized": "Non categorizzato",
|
||||
"downloadBoard": "Scarica la bacheca"
|
||||
"downloadBoard": "Scarica la bacheca",
|
||||
"deleteBoardOnly": "Elimina solo la Bacheca",
|
||||
"deleteBoard": "Elimina Bacheca",
|
||||
"deleteBoardAndImages": "Elimina Bacheca e Immagini",
|
||||
"deletedBoardsCannotbeRestored": "Le bacheche eliminate non possono essere ripristinate"
|
||||
},
|
||||
"controlnet": {
|
||||
"contentShuffleDescription": "Rimescola il contenuto di un'immagine",
|
||||
@@ -1085,7 +1103,7 @@
|
||||
"none": "Nessuno",
|
||||
"incompatibleBaseModel": "Modello base incompatibile:",
|
||||
"pidiDescription": "Elaborazione immagini PIDI",
|
||||
"fill": "Riempire",
|
||||
"fill": "Riempie",
|
||||
"colorMapDescription": "Genera una mappa dei colori dall'immagine",
|
||||
"lineartAnimeDescription": "Elaborazione lineart in stile anime",
|
||||
"imageResolution": "Risoluzione dell'immagine",
|
||||
@@ -1179,7 +1197,9 @@
|
||||
"clearQueueAlertDialog2": "Sei sicuro di voler cancellare la coda?",
|
||||
"item": "Elemento",
|
||||
"graphFailedToQueue": "Impossibile mettere in coda il grafico",
|
||||
"queueMaxExceeded": "È stato superato il limite massimo di {{max_queue_size}} e {{skip}} elementi verrebbero saltati"
|
||||
"queueMaxExceeded": "È stato superato il limite massimo di {{max_queue_size}} e {{skip}} elementi verrebbero saltati",
|
||||
"batchFieldValues": "Valori Campi Lotto",
|
||||
"time": "Tempo"
|
||||
},
|
||||
"embedding": {
|
||||
"noMatchingEmbedding": "Nessun Incorporamento corrispondente",
|
||||
@@ -1195,7 +1215,9 @@
|
||||
"selectModel": "Seleziona un modello",
|
||||
"selectLoRA": "Seleziona un LoRA",
|
||||
"noRefinerModelsInstalled": "Nessun modello SDXL Refiner installato",
|
||||
"noLoRAsInstalled": "Nessun LoRA installato"
|
||||
"noLoRAsInstalled": "Nessun LoRA installato",
|
||||
"esrganModel": "Modello ESRGAN",
|
||||
"addLora": "Aggiungi LoRA"
|
||||
},
|
||||
"invocationCache": {
|
||||
"disable": "Disabilita",
|
||||
@@ -1227,7 +1249,8 @@
|
||||
"promptsWithCount_one": "{{count}} Prompt",
|
||||
"promptsWithCount_many": "{{count}} Prompt",
|
||||
"promptsWithCount_other": "{{count}} Prompt",
|
||||
"dynamicPrompts": "Prompt dinamici"
|
||||
"dynamicPrompts": "Prompt dinamici",
|
||||
"promptsPreview": "Anteprima dei prompt"
|
||||
},
|
||||
"popovers": {
|
||||
"paramScheduler": {
|
||||
|
||||
@@ -438,7 +438,15 @@
|
||||
"useSeed": "シード値を使用",
|
||||
"useAll": "すべてを使用",
|
||||
"info": "情報",
|
||||
"showOptionsPanel": "オプションパネルを表示"
|
||||
"showOptionsPanel": "オプションパネルを表示",
|
||||
"aspectRatioFree": "自由",
|
||||
"invoke": {
|
||||
"noControlImageForControlAdapter": "コントロールアダプター #{{number}} に画像がありません",
|
||||
"noModelForControlAdapter": "コントロールアダプター #{{number}} のモデルが選択されていません。"
|
||||
},
|
||||
"aspectRatio": "縦横比",
|
||||
"iterations": "生成回数",
|
||||
"general": "基本設定"
|
||||
},
|
||||
"settings": {
|
||||
"models": "モデル",
|
||||
@@ -603,7 +611,7 @@
|
||||
"delete": "削除",
|
||||
"controlAdapter_other": "コントロールアダプター",
|
||||
"colorMapTileSize": "タイルサイズ",
|
||||
"ipAdapterImageFallback": "IP Adapterの画像が選択されていません",
|
||||
"ipAdapterImageFallback": "IPアダプターの画像が選択されていません",
|
||||
"mediapipeFaceDescription": "Mediapipeを使用して顔を検出",
|
||||
"depthZoeDescription": "Zoeを使用して深度マップを生成",
|
||||
"setControlImageDimensions": "コントロール画像のサイズを幅と高さにセット",
|
||||
@@ -652,7 +660,7 @@
|
||||
"queueTotal": "合計 {{total}}",
|
||||
"resumeSucceeded": "処理が再開されました",
|
||||
"resumeTooltip": "処理を再開",
|
||||
"resume": "再会",
|
||||
"resume": "再開",
|
||||
"status": "ステータス",
|
||||
"pruneSucceeded": "キューから完了アイテム{{item_count}}件を削除しました",
|
||||
"cancelTooltip": "現在のアイテムをキャンセル",
|
||||
@@ -812,5 +820,13 @@
|
||||
"clear": "クリア",
|
||||
"maxCacheSize": "最大キャッシュサイズ",
|
||||
"cacheSize": "キャッシュサイズ"
|
||||
},
|
||||
"popovers": {
|
||||
"paramRatio": {
|
||||
"heading": "縦横比",
|
||||
"paragraphs": [
|
||||
"生成された画像の縦横比。"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,7 +110,6 @@
|
||||
"deleteImageBin": "Verwijderde afbeeldingen worden naar de prullenbak van je besturingssysteem gestuurd.",
|
||||
"deleteImagePermanent": "Verwijderde afbeeldingen kunnen niet worden hersteld.",
|
||||
"assets": "Eigen onderdelen",
|
||||
"images": "Afbeeldingen",
|
||||
"autoAssignBoardOnClick": "Ken automatisch bord toe bij klikken",
|
||||
"featuresWillReset": "Als je deze afbeelding verwijdert, dan worden deze functies onmiddellijk teruggezet.",
|
||||
"loading": "Bezig met laden",
|
||||
|
||||
@@ -101,7 +101,6 @@
|
||||
"deleteImagePermanent": "Удаленные изображения невозможно восстановить.",
|
||||
"deleteImageBin": "Удаленные изображения будут отправлены в корзину вашей операционной системы.",
|
||||
"deleteImage": "Удалить изображение",
|
||||
"images": "Изображения",
|
||||
"assets": "Ресурсы",
|
||||
"autoAssignBoardOnClick": "Авто-назначение доски по клику"
|
||||
},
|
||||
|
||||
@@ -90,7 +90,16 @@
|
||||
"controlAdapter": "Control Adapter",
|
||||
"controlNet": "ControlNet",
|
||||
"on": "开",
|
||||
"auto": "自动"
|
||||
"auto": "自动",
|
||||
"checkpoint": "Checkpoint",
|
||||
"inpaint": "内补重绘",
|
||||
"simple": "简单",
|
||||
"template": "模板",
|
||||
"outputs": "输出",
|
||||
"data": "数据",
|
||||
"safetensors": "Safetensors",
|
||||
"outpaint": "外扩绘制",
|
||||
"details": "详情"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "生成的图像",
|
||||
@@ -109,7 +118,6 @@
|
||||
"deleteImage": "删除图片",
|
||||
"deleteImageBin": "被删除的图片会发送到你操作系统的回收站。",
|
||||
"deleteImagePermanent": "删除的图片无法被恢复。",
|
||||
"images": "图片",
|
||||
"assets": "素材",
|
||||
"autoAssignBoardOnClick": "点击后自动分配面板",
|
||||
"featuresWillReset": "如果您删除该图像,这些功能会立即被重置。",
|
||||
@@ -121,7 +129,8 @@
|
||||
"setCurrentImage": "设为当前图像",
|
||||
"preparingDownload": "准备下载",
|
||||
"preparingDownloadFailed": "准备下载时出现问题",
|
||||
"downloadSelection": "下载所选内容"
|
||||
"downloadSelection": "下载所选内容",
|
||||
"noImageSelected": "无选中的图像"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "键盘快捷键",
|
||||
@@ -475,7 +484,9 @@
|
||||
"oliveModels": "Olive",
|
||||
"loraModels": "LoRA",
|
||||
"alpha": "Alpha",
|
||||
"vaePrecision": "VAE 精度"
|
||||
"vaePrecision": "VAE 精度",
|
||||
"checkpointOrSafetensors": "$t(common.checkpoint) / $t(common.safetensors)",
|
||||
"noModelSelected": "无选中的模型"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "图像",
|
||||
@@ -602,7 +613,9 @@
|
||||
"seamlessX&Y": "无缝 X & Y",
|
||||
"aspectRatioFree": "自由",
|
||||
"seamlessX": "无缝 X",
|
||||
"seamlessY": "无缝 Y"
|
||||
"seamlessY": "无缝 Y",
|
||||
"maskEdge": "遮罩边缘",
|
||||
"unmasked": "取消遮罩"
|
||||
},
|
||||
"settings": {
|
||||
"models": "模型",
|
||||
@@ -639,7 +652,10 @@
|
||||
"clearIntermediatesDesc1": "清除中间产物会重置您的画布和 ControlNet 状态。",
|
||||
"intermediatesClearedFailed": "清除中间产物时出现问题",
|
||||
"clearIntermediatesWithCount_other": "清除 {{count}} 个中间产物",
|
||||
"clearIntermediatesDisabled": "队列为空才能清理中间产物"
|
||||
"clearIntermediatesDisabled": "队列为空才能清理中间产物",
|
||||
"enableNSFWChecker": "启用成人内容检测器",
|
||||
"enableInvisibleWatermark": "启用不可见水印",
|
||||
"enableInformationalPopovers": "启用信息弹窗"
|
||||
},
|
||||
"toast": {
|
||||
"tempFoldersEmptied": "临时文件夹已清空",
|
||||
@@ -705,7 +721,7 @@
|
||||
"modelAddFailed": "模型添加失败",
|
||||
"problemDownloadingCanvas": "下载画布时出现问题",
|
||||
"problemMergingCanvas": "合并画布时出现问题",
|
||||
"setCanvasInitialImage": "设为画布初始图像",
|
||||
"setCanvasInitialImage": "设定画布初始图像",
|
||||
"imageUploaded": "图像已上传",
|
||||
"addedToBoard": "已添加到面板",
|
||||
"workflowLoaded": "工作流已加载",
|
||||
@@ -722,7 +738,8 @@
|
||||
"canvasSavedGallery": "画布已保存到图库",
|
||||
"imageUploadFailed": "图像上传失败",
|
||||
"problemImportingMask": "导入遮罩时出现问题",
|
||||
"baseModelChangedCleared_other": "基础模型已更改, 已清除或禁用 {{count}} 个不兼容的子模型"
|
||||
"baseModelChangedCleared_other": "基础模型已更改, 已清除或禁用 {{count}} 个不兼容的子模型",
|
||||
"setAsCanvasInitialImage": "设为画布初始图像"
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"layer": "图层",
|
||||
@@ -808,7 +825,8 @@
|
||||
"toggleAutoscroll": "切换自动缩放",
|
||||
"menu": "菜单",
|
||||
"showGalleryPanel": "显示图库浮窗",
|
||||
"loadMore": "加载更多"
|
||||
"loadMore": "加载更多",
|
||||
"mode": "模式"
|
||||
},
|
||||
"ui": {
|
||||
"showProgressImages": "显示处理中的图片",
|
||||
@@ -1031,7 +1049,9 @@
|
||||
"integerPolymorphic": "整数多态",
|
||||
"latentsPolymorphic": "Latents 多态",
|
||||
"conditioningField": "条件",
|
||||
"latentsField": "Latents"
|
||||
"latentsField": "Latents",
|
||||
"updateAllNodes": "更新所有节点",
|
||||
"unableToUpdateNodes_other": "{{count}} 个节点无法完成更新"
|
||||
},
|
||||
"controlnet": {
|
||||
"resize": "直接缩放",
|
||||
@@ -1117,7 +1137,8 @@
|
||||
"openPose": "Openpose",
|
||||
"controlAdapter_other": "Control Adapters",
|
||||
"lineartAnime": "Lineart Anime",
|
||||
"canny": "Canny"
|
||||
"canny": "Canny",
|
||||
"unstarImage": "取消收藏图像"
|
||||
},
|
||||
"queue": {
|
||||
"status": "状态",
|
||||
@@ -1176,7 +1197,9 @@
|
||||
"queueTotal": "总计 {{total}}",
|
||||
"enqueueing": "队列中的批次",
|
||||
"queueMaxExceeded": "超出最大值 {{max_queue_size}},将跳过 {{skip}}",
|
||||
"graphFailedToQueue": "节点图加入队列失败"
|
||||
"graphFailedToQueue": "节点图加入队列失败",
|
||||
"batchFieldValues": "批处理值",
|
||||
"time": "时间"
|
||||
},
|
||||
"sdxl": {
|
||||
"refinerStart": "Refiner 开始作用时机",
|
||||
@@ -1234,7 +1257,9 @@
|
||||
"selectModel": "选择一个模型",
|
||||
"selectLoRA": "选择一个 LoRA",
|
||||
"noRefinerModelsInstalled": "无已安装的 SDXL Refiner 模型",
|
||||
"noLoRAsInstalled": "无已安装的 LoRA"
|
||||
"noLoRAsInstalled": "无已安装的 LoRA",
|
||||
"esrganModel": "ESRGAN 模型",
|
||||
"addLora": "添加 LoRA"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "自动添加面板",
|
||||
@@ -1252,7 +1277,11 @@
|
||||
"changeBoard": "更改面板",
|
||||
"loading": "加载中...",
|
||||
"clearSearch": "清除检索",
|
||||
"downloadBoard": "下载面板"
|
||||
"downloadBoard": "下载面板",
|
||||
"deleteBoardOnly": "仅删除面板",
|
||||
"deleteBoard": "删除面板",
|
||||
"deleteBoardAndImages": "删除面板和图像",
|
||||
"deletedBoardsCannotbeRestored": "已删除的面板无法被恢复"
|
||||
},
|
||||
"embedding": {
|
||||
"noMatchingEmbedding": "不匹配的 Embedding",
|
||||
@@ -1271,7 +1300,8 @@
|
||||
"combinatorial": "组合生成",
|
||||
"maxPrompts": "最大提示词数",
|
||||
"dynamicPrompts": "动态提示词",
|
||||
"promptsWithCount_other": "{{count}} 个提示词"
|
||||
"promptsWithCount_other": "{{count}} 个提示词",
|
||||
"promptsPreview": "提示词预览"
|
||||
},
|
||||
"popovers": {
|
||||
"compositingMaskAdjustments": {
|
||||
|
||||
@@ -1,61 +1,19 @@
|
||||
import fs from 'node:fs';
|
||||
import openapiTS from 'openapi-typescript';
|
||||
import { COLORS } from './colors.js';
|
||||
|
||||
const OPENAPI_URL = 'http://127.0.0.1:9090/openapi.json';
|
||||
const OUTPUT_FILE = 'src/services/api/schema.d.ts';
|
||||
|
||||
async function main() {
|
||||
process.stdout.write(
|
||||
`Generating types "${OPENAPI_URL}" --> "${OUTPUT_FILE}"...\n\n`
|
||||
`Generating types "${OPENAPI_URL}" --> "${OUTPUT_FILE}"...`
|
||||
);
|
||||
const types = await openapiTS(OPENAPI_URL, {
|
||||
exportType: true,
|
||||
transform: (schemaObject, metadata) => {
|
||||
transform: (schemaObject) => {
|
||||
if ('format' in schemaObject && schemaObject.format === 'binary') {
|
||||
return schemaObject.nullable ? 'Blob | null' : 'Blob';
|
||||
}
|
||||
|
||||
/**
|
||||
* Because invocations may have required fields that accept connection input, the generated
|
||||
* types may be incorrect.
|
||||
*
|
||||
* For example, the ImageResizeInvocation has a required `image` field, but because it accepts
|
||||
* connection input, it should be optional on instantiation of the field.
|
||||
*
|
||||
* To handle this, the schema exposes an `input` property that can be used to determine if the
|
||||
* field accepts connection input. If it does, we can make the field optional.
|
||||
*/
|
||||
|
||||
if ('class' in schemaObject && schemaObject.class === 'invocation') {
|
||||
// We only want to make fields optional if they are required
|
||||
if (!Array.isArray(schemaObject?.required)) {
|
||||
schemaObject.required = [];
|
||||
}
|
||||
|
||||
schemaObject.required.forEach((prop) => {
|
||||
const acceptsConnection = ['any', 'connection'].includes(
|
||||
schemaObject.properties?.[prop]?.['input']
|
||||
);
|
||||
|
||||
if (acceptsConnection) {
|
||||
// remove this prop from the required array
|
||||
const invocationName = metadata.path.split('/').pop();
|
||||
console.log(
|
||||
`Making connectable field optional: ${COLORS.fg.green}${invocationName}.${COLORS.fg.cyan}${prop}${COLORS.reset}`
|
||||
);
|
||||
schemaObject.required = schemaObject.required.filter(
|
||||
(r) => r !== prop
|
||||
);
|
||||
}
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if we are generating types for an invocation output
|
||||
if ('class' in schemaObject && schemaObject.class === 'output') {
|
||||
// modify output types
|
||||
}
|
||||
},
|
||||
});
|
||||
fs.writeFileSync(OUTPUT_FILE, types);
|
||||
|
||||
@@ -20,6 +20,7 @@ import AppErrorBoundaryFallback from './AppErrorBoundaryFallback';
|
||||
import GlobalHotkeys from './GlobalHotkeys';
|
||||
import PreselectedImage from './PreselectedImage';
|
||||
import Toaster from './Toaster';
|
||||
import { useSocketIO } from 'app/hooks/useSocketIO';
|
||||
|
||||
const DEFAULT_CONFIG = {};
|
||||
|
||||
@@ -33,10 +34,12 @@ interface Props {
|
||||
|
||||
const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
|
||||
const language = useAppSelector(languageSelector);
|
||||
|
||||
const logger = useLogger('system');
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
// singleton!
|
||||
useSocketIO();
|
||||
|
||||
const handleReset = useCallback(() => {
|
||||
localStorage.clear();
|
||||
location.reload();
|
||||
|
||||
@@ -2,6 +2,7 @@ import { Flex, Heading, Link, Text, useToast } from '@chakra-ui/react';
|
||||
import IAIButton from 'common/components/IAIButton';
|
||||
import newGithubIssueUrl from 'new-github-issue-url';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { FaCopy, FaExternalLinkAlt } from 'react-icons/fa';
|
||||
import { FaArrowRotateLeft } from 'react-icons/fa6';
|
||||
import { serializeError } from 'serialize-error';
|
||||
@@ -13,6 +14,7 @@ type Props = {
|
||||
|
||||
const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
|
||||
const toast = useToast();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleCopy = useCallback(() => {
|
||||
const text = JSON.stringify(serializeError(error), null, 2);
|
||||
@@ -53,7 +55,7 @@ const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
|
||||
p: 16,
|
||||
}}
|
||||
>
|
||||
<Heading>Something went wrong</Heading>
|
||||
<Heading>{t('common.somethingWentWrong')}</Heading>
|
||||
<Flex
|
||||
layerStyle="second"
|
||||
sx={{
|
||||
@@ -80,13 +82,15 @@ const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
|
||||
leftIcon={<FaArrowRotateLeft />}
|
||||
onClick={resetErrorBoundary}
|
||||
>
|
||||
Reset UI
|
||||
{t('accessibility.resetUI')}
|
||||
</IAIButton>
|
||||
<IAIButton leftIcon={<FaCopy />} onClick={handleCopy}>
|
||||
Copy Error
|
||||
{t('common.copyError')}
|
||||
</IAIButton>
|
||||
<Link href={url} isExternal>
|
||||
<IAIButton leftIcon={<FaExternalLinkAlt />}>Create Issue</IAIButton>
|
||||
<IAIButton leftIcon={<FaExternalLinkAlt />}>
|
||||
{t('accessibility.createIssue')}
|
||||
</IAIButton>
|
||||
</Link>
|
||||
</Flex>
|
||||
</Flex>
|
||||
|
||||
@@ -1,26 +1,27 @@
|
||||
import { Middleware } from '@reduxjs/toolkit';
|
||||
import { $socketOptions } from 'app/hooks/useSocketIO';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
||||
import { $customStarUI, CustomStarUi } from 'app/store/nanostores/customStarUI';
|
||||
import { $headerComponent } from 'app/store/nanostores/headerComponent';
|
||||
import { $isDebugging } from 'app/store/nanostores/isDebugging';
|
||||
import { $projectId } from 'app/store/nanostores/projectId';
|
||||
import { $queueId, DEFAULT_QUEUE_ID } from 'app/store/nanostores/queueId';
|
||||
import { store } from 'app/store/store';
|
||||
import { PartialAppConfig } from 'app/types/invokeai';
|
||||
import React, {
|
||||
lazy,
|
||||
memo,
|
||||
PropsWithChildren,
|
||||
ReactNode,
|
||||
lazy,
|
||||
memo,
|
||||
useEffect,
|
||||
} from 'react';
|
||||
import { Provider } from 'react-redux';
|
||||
import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares';
|
||||
import { $authToken, $baseUrl, $projectId } from 'services/api/client';
|
||||
import { socketMiddleware } from 'services/events/middleware';
|
||||
import Loading from '../../common/components/Loading/Loading';
|
||||
import '../../i18n';
|
||||
import AppDndContext from '../../features/dnd/components/AppDndContext';
|
||||
import { $customStarUI, CustomStarUi } from 'app/store/nanostores/customStarUI';
|
||||
import { $headerComponent } from 'app/store/nanostores/headerComponent';
|
||||
import {
|
||||
$queueId,
|
||||
DEFAULT_QUEUE_ID,
|
||||
} from 'features/queue/store/queueNanoStore';
|
||||
import { ManagerOptions, SocketOptions } from 'socket.io-client';
|
||||
import Loading from 'common/components/Loading/Loading';
|
||||
import AppDndContext from 'features/dnd/components/AppDndContext';
|
||||
import 'i18n';
|
||||
|
||||
const App = lazy(() => import('./App'));
|
||||
const ThemeLocaleProvider = lazy(() => import('./ThemeLocaleProvider'));
|
||||
@@ -38,6 +39,8 @@ interface Props extends PropsWithChildren {
|
||||
action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters';
|
||||
};
|
||||
customStarUi?: CustomStarUi;
|
||||
socketOptions?: Partial<ManagerOptions & SocketOptions>;
|
||||
isDebugging?: boolean;
|
||||
}
|
||||
|
||||
const InvokeAIUI = ({
|
||||
@@ -50,6 +53,8 @@ const InvokeAIUI = ({
|
||||
queueId,
|
||||
selectedImage,
|
||||
customStarUi,
|
||||
socketOptions,
|
||||
isDebugging = false,
|
||||
}: Props) => {
|
||||
useEffect(() => {
|
||||
// configure API client token
|
||||
@@ -82,9 +87,7 @@ const InvokeAIUI = ({
|
||||
|
||||
// rebuild socket middleware with token and apiUrl
|
||||
if (middleware && middleware.length > 0) {
|
||||
addMiddleware(socketMiddleware(), ...middleware);
|
||||
} else {
|
||||
addMiddleware(socketMiddleware());
|
||||
addMiddleware(...middleware);
|
||||
}
|
||||
|
||||
return () => {
|
||||
@@ -116,6 +119,24 @@ const InvokeAIUI = ({
|
||||
};
|
||||
}, [headerComponent]);
|
||||
|
||||
useEffect(() => {
|
||||
if (socketOptions) {
|
||||
$socketOptions.set(socketOptions);
|
||||
}
|
||||
return () => {
|
||||
$socketOptions.set({});
|
||||
};
|
||||
}, [socketOptions]);
|
||||
|
||||
useEffect(() => {
|
||||
if (isDebugging) {
|
||||
$isDebugging.set(isDebugging);
|
||||
}
|
||||
return () => {
|
||||
$isDebugging.set(false);
|
||||
};
|
||||
}, [isDebugging]);
|
||||
|
||||
return (
|
||||
<React.StrictMode>
|
||||
<Provider store={store}>
|
||||
|
||||
109
invokeai/frontend/web/src/app/hooks/useSocketIO.ts
Normal file
109
invokeai/frontend/web/src/app/hooks/useSocketIO.ts
Normal file
@@ -0,0 +1,109 @@
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { $authToken } from 'app/store/nanostores/authToken';
|
||||
import { $baseUrl } from 'app/store/nanostores/baseUrl';
|
||||
import { $isDebugging } from 'app/store/nanostores/isDebugging';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { MapStore, WritableAtom, atom, map } from 'nanostores';
|
||||
import { useEffect } from 'react';
|
||||
import {
|
||||
ClientToServerEvents,
|
||||
ServerToClientEvents,
|
||||
} from 'services/events/types';
|
||||
import { setEventListeners } from 'services/events/util/setEventListeners';
|
||||
import { ManagerOptions, Socket, SocketOptions, io } from 'socket.io-client';
|
||||
|
||||
// Inject socket options and url into window for debugging
|
||||
declare global {
|
||||
interface Window {
|
||||
$socketOptions?: MapStore<Partial<ManagerOptions & SocketOptions>>;
|
||||
$socketUrl?: WritableAtom<string>;
|
||||
}
|
||||
}
|
||||
|
||||
const makeSocketOptions = (): Partial<ManagerOptions & SocketOptions> => {
|
||||
const socketOptions: Parameters<typeof io>[0] = {
|
||||
timeout: 60000,
|
||||
path: '/ws/socket.io',
|
||||
autoConnect: false, // achtung! removing this breaks the dynamic middleware
|
||||
forceNew: true,
|
||||
};
|
||||
|
||||
// if building in package mode, replace socket url with open api base url minus the http protocol
|
||||
if (['nodes', 'package'].includes(import.meta.env.MODE)) {
|
||||
const authToken = $authToken.get();
|
||||
if (authToken) {
|
||||
// TODO: handle providing jwt to socket.io
|
||||
socketOptions.auth = { token: authToken };
|
||||
}
|
||||
|
||||
socketOptions.transports = ['websocket', 'polling'];
|
||||
}
|
||||
|
||||
return socketOptions;
|
||||
};
|
||||
|
||||
const makeSocketUrl = (): string => {
|
||||
const wsProtocol = window.location.protocol === 'https:' ? 'wss' : 'ws';
|
||||
let socketUrl = `${wsProtocol}://${window.location.host}`;
|
||||
if (['nodes', 'package'].includes(import.meta.env.MODE)) {
|
||||
const baseUrl = $baseUrl.get();
|
||||
if (baseUrl) {
|
||||
//eslint-disable-next-line
|
||||
socketUrl = baseUrl.replace(/^https?\:\/\//i, '');
|
||||
}
|
||||
}
|
||||
return socketUrl;
|
||||
};
|
||||
|
||||
const makeSocket = (): Socket<ServerToClientEvents, ClientToServerEvents> => {
|
||||
const socketOptions = makeSocketOptions();
|
||||
const socketUrl = $socketUrl.get();
|
||||
const socket: Socket<ServerToClientEvents, ClientToServerEvents> = io(
|
||||
socketUrl,
|
||||
{ ...socketOptions, ...$socketOptions.get() }
|
||||
);
|
||||
return socket;
|
||||
};
|
||||
|
||||
export const $socketOptions = map<Partial<ManagerOptions & SocketOptions>>({});
|
||||
export const $socketUrl = atom<string>(makeSocketUrl());
|
||||
export const $isSocketInitialized = atom<boolean>(false);
|
||||
|
||||
/**
|
||||
* Initializes the socket.io connection and sets up event listeners.
|
||||
*/
|
||||
export const useSocketIO = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const socketOptions = useStore($socketOptions);
|
||||
const socketUrl = useStore($socketUrl);
|
||||
const baseUrl = useStore($baseUrl);
|
||||
const authToken = useStore($authToken);
|
||||
|
||||
useEffect(() => {
|
||||
if ($isSocketInitialized.get()) {
|
||||
// Singleton!
|
||||
return;
|
||||
}
|
||||
const socket = makeSocket();
|
||||
setEventListeners({ dispatch, socket });
|
||||
socket.connect();
|
||||
|
||||
if ($isDebugging.get()) {
|
||||
window.$socketOptions = $socketOptions;
|
||||
window.$socketUrl = $socketUrl;
|
||||
console.log('Socket initialized', socket);
|
||||
}
|
||||
|
||||
$isSocketInitialized.set(true);
|
||||
|
||||
return () => {
|
||||
if ($isDebugging.get()) {
|
||||
window.$socketOptions = undefined;
|
||||
window.$socketUrl = undefined;
|
||||
console.log('Socket teardown', socket);
|
||||
}
|
||||
socket.disconnect();
|
||||
$isSocketInitialized.set(false);
|
||||
};
|
||||
}, [dispatch, socketOptions, socketUrl, baseUrl, authToken]);
|
||||
};
|
||||
@@ -6,7 +6,7 @@ import {
|
||||
createListenerMiddleware,
|
||||
} from '@reduxjs/toolkit';
|
||||
|
||||
import type { AppDispatch, RootState } from '../../store';
|
||||
import type { AppDispatch, RootState } from 'app/store/store';
|
||||
import { addCommitStagingAreaImageListener } from './listeners/addCommitStagingAreaImageListener';
|
||||
import { addFirstListImagesListener } from './listeners/addFirstListImagesListener.ts';
|
||||
import { addAnyEnqueuedListener } from './listeners/anyEnqueued';
|
||||
@@ -71,7 +71,7 @@ import { addSocketUnsubscribedEventListener as addSocketUnsubscribedListener } f
|
||||
import { addStagingAreaImageSavedListener } from './listeners/stagingAreaImageSaved';
|
||||
import { addTabChangedListener } from './listeners/tabChanged';
|
||||
import { addUpscaleRequestedListener } from './listeners/upscaleRequested';
|
||||
import { addWorkflowLoadedListener } from './listeners/workflowLoaded';
|
||||
import { addWorkflowLoadRequestedListener } from './listeners/workflowLoadRequested';
|
||||
import { addUpdateAllNodesRequestedListener } from './listeners/updateAllNodesRequested';
|
||||
|
||||
export const listenerMiddleware = createListenerMiddleware();
|
||||
@@ -178,7 +178,7 @@ addBoardIdSelectedListener();
|
||||
addReceivedOpenAPISchemaListener();
|
||||
|
||||
// Workflows
|
||||
addWorkflowLoadedListener();
|
||||
addWorkflowLoadRequestedListener();
|
||||
addUpdateAllNodesRequestedListener();
|
||||
|
||||
// DND
|
||||
|
||||
@@ -12,10 +12,10 @@ import { addToast } from 'features/system/store/systemSlice';
|
||||
import { t } from 'i18next';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { isImageOutput } from 'services/api/guards';
|
||||
import { BatchConfig, ImageDTO } from 'services/api/types';
|
||||
import { socketInvocationComplete } from 'services/events/actions';
|
||||
import { startAppListening } from '..';
|
||||
import { isImageOutput } from 'features/nodes/types/common';
|
||||
|
||||
export const addControlNetImageProcessedListener = () => {
|
||||
startAppListening({
|
||||
|
||||
@@ -10,8 +10,8 @@ import { blobToDataURL } from 'features/canvas/util/blobToDataURL';
|
||||
import { getCanvasData } from 'features/canvas/util/getCanvasData';
|
||||
import { getCanvasGenerationMode } from 'features/canvas/util/getCanvasGenerationMode';
|
||||
import { canvasGraphBuilt } from 'features/nodes/store/actions';
|
||||
import { buildCanvasGraph } from 'features/nodes/util/graphBuilders/buildCanvasGraph';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graphBuilders/buildLinearBatchConfig';
|
||||
import { buildCanvasGraph } from 'features/nodes/util/graph/buildCanvasGraph';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { ImageDTO } from 'services/api/types';
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graphBuilders/buildLinearBatchConfig';
|
||||
import { buildLinearImageToImageGraph } from 'features/nodes/util/graphBuilders/buildLinearImageToImageGraph';
|
||||
import { buildLinearSDXLImageToImageGraph } from 'features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph';
|
||||
import { buildLinearSDXLTextToImageGraph } from 'features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph';
|
||||
import { buildLinearTextToImageGraph } from 'features/nodes/util/graphBuilders/buildLinearTextToImageGraph';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildLinearImageToImageGraph } from 'features/nodes/util/graph/buildLinearImageToImageGraph';
|
||||
import { buildLinearSDXLImageToImageGraph } from 'features/nodes/util/graph/buildLinearSDXLImageToImageGraph';
|
||||
import { buildLinearSDXLTextToImageGraph } from 'features/nodes/util/graph/buildLinearSDXLTextToImageGraph';
|
||||
import { buildLinearTextToImageGraph } from 'features/nodes/util/graph/buildLinearTextToImageGraph';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { startAppListening } from '..';
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import { buildNodesGraph } from 'features/nodes/util/graphBuilders/buildNodesGraph';
|
||||
import { buildNodesGraph } from 'features/nodes/util/graph/buildNodesGraph';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
import { BatchConfig } from 'services/api/types';
|
||||
import { startAppListening } from '..';
|
||||
|
||||
@@ -5,19 +5,20 @@ import {
|
||||
controlAdapterProcessedImageChanged,
|
||||
selectControlAdapterAll,
|
||||
} from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
|
||||
import { imageDeletionConfirmed } from 'features/deleteImageModal/store/actions';
|
||||
import { isModalOpenChanged } from 'features/deleteImageModal/store/slice';
|
||||
import { selectListImagesBaseQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import { imageSelected } from 'features/gallery/store/gallerySlice';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { isInvocationNode } from 'features/nodes/types/types';
|
||||
import { isImageFieldInputInstance } from 'features/nodes/types/field';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { clearInitialImage } from 'features/parameters/store/generationSlice';
|
||||
import { clamp, forEach } from 'lodash-es';
|
||||
import { api } from 'services/api';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { imagesAdapter } from 'services/api/util';
|
||||
import { startAppListening } from '..';
|
||||
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
|
||||
|
||||
export const addRequestedSingleImageDeletionListener = () => {
|
||||
startAppListening({
|
||||
@@ -121,7 +122,7 @@ export const addRequestedSingleImageDeletionListener = () => {
|
||||
|
||||
forEach(node.data.inputs, (input) => {
|
||||
if (
|
||||
input.type === 'ImageField' &&
|
||||
isImageFieldInputInstance(input) &&
|
||||
input.value?.image_name === imageDTO.image_name
|
||||
) {
|
||||
dispatch(
|
||||
@@ -241,7 +242,7 @@ export const addRequestedMultipleImageDeletionListener = () => {
|
||||
|
||||
forEach(node.data.inputs, (input) => {
|
||||
if (
|
||||
input.type === 'ImageField' &&
|
||||
isImageFieldInputInstance(input) &&
|
||||
input.value?.image_name === imageDTO.image_name
|
||||
) {
|
||||
dispatch(
|
||||
|
||||
@@ -12,7 +12,7 @@ import { t } from 'i18next';
|
||||
import { omit } from 'lodash-es';
|
||||
import { boardsApi } from 'services/api/endpoints/boards';
|
||||
import { startAppListening } from '..';
|
||||
import { imagesApi } from '../../../../../services/api/endpoints/images';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
export const addImageUploadedFulfilledListener = () => {
|
||||
startAppListening({
|
||||
@@ -79,7 +79,7 @@ export const addImageUploadedFulfilledListener = () => {
|
||||
dispatch(
|
||||
addToast({
|
||||
...DEFAULT_UPLOADED_TOAST,
|
||||
description: t('toast.setCanvasInitialImage'),
|
||||
description: t('toast.setAsCanvasInitialImage'),
|
||||
})
|
||||
);
|
||||
return;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { startAppListening } from '..';
|
||||
import { selectionChanged } from '../../../../../features/gallery/store/gallerySlice';
|
||||
import { ImageDTO } from '../../../../../services/api/types';
|
||||
import { selectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { ImageDTO } from 'services/api/types';
|
||||
|
||||
export const addImagesStarredListener = () => {
|
||||
startAppListening({
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { startAppListening } from '..';
|
||||
import { selectionChanged } from '../../../../../features/gallery/store/gallerySlice';
|
||||
import { ImageDTO } from '../../../../../services/api/types';
|
||||
import { selectionChanged } from 'features/gallery/store/gallerySlice';
|
||||
import { ImageDTO } from 'services/api/types';
|
||||
|
||||
export const addImagesUnstarredListener = () => {
|
||||
startAppListening({
|
||||
|
||||
@@ -12,12 +12,12 @@ import {
|
||||
setWidth,
|
||||
vaeSelected,
|
||||
} from 'features/parameters/store/generationSlice';
|
||||
import { zMainOrOnnxModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { addToast } from 'features/system/store/systemSlice';
|
||||
import { makeToast } from 'features/system/util/makeToast';
|
||||
import { t } from 'i18next';
|
||||
import { forEach } from 'lodash-es';
|
||||
import { startAppListening } from '..';
|
||||
import { zParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
|
||||
export const addModelSelectedListener = () => {
|
||||
startAppListening({
|
||||
@@ -26,7 +26,7 @@ export const addModelSelectedListener = () => {
|
||||
const log = logger('models');
|
||||
|
||||
const state = getState();
|
||||
const result = zMainOrOnnxModel.safeParse(action.payload);
|
||||
const result = zParameterModel.safeParse(action.payload);
|
||||
|
||||
if (!result.success) {
|
||||
log.error(
|
||||
|
||||
@@ -11,9 +11,9 @@ import {
|
||||
vaeSelected,
|
||||
} from 'features/parameters/store/generationSlice';
|
||||
import {
|
||||
zMainOrOnnxModel,
|
||||
zSDXLRefinerModel,
|
||||
zVaeModel,
|
||||
zParameterModel,
|
||||
zParameterSDXLRefinerModel,
|
||||
zParameterVAEModel,
|
||||
} from 'features/parameters/types/parameterSchemas';
|
||||
import {
|
||||
refinerModelChanged,
|
||||
@@ -67,7 +67,7 @@ export const addModelsLoadedListener = () => {
|
||||
return;
|
||||
}
|
||||
|
||||
const result = zMainOrOnnxModel.safeParse(models[0]);
|
||||
const result = zParameterModel.safeParse(models[0]);
|
||||
|
||||
if (!result.success) {
|
||||
log.error(
|
||||
@@ -119,7 +119,7 @@ export const addModelsLoadedListener = () => {
|
||||
return;
|
||||
}
|
||||
|
||||
const result = zSDXLRefinerModel.safeParse(models[0]);
|
||||
const result = zParameterSDXLRefinerModel.safeParse(models[0]);
|
||||
|
||||
if (!result.success) {
|
||||
log.error(
|
||||
@@ -170,7 +170,7 @@ export const addModelsLoadedListener = () => {
|
||||
return;
|
||||
}
|
||||
|
||||
const result = zVaeModel.safeParse(firstModel);
|
||||
const result = zParameterVAEModel.safeParse(firstModel);
|
||||
|
||||
if (!result.success) {
|
||||
log.error(
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { parseify } from 'common/util/serialize';
|
||||
import { nodeTemplatesBuilt } from 'features/nodes/store/nodesSlice';
|
||||
import { parseSchema } from 'features/nodes/util/parseSchema';
|
||||
import { parseSchema } from 'features/nodes/util/schema/parseSchema';
|
||||
import { size } from 'lodash-es';
|
||||
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
||||
import { startAppListening } from '..';
|
||||
@@ -15,6 +15,7 @@ export const addReceivedOpenAPISchemaListener = () => {
|
||||
|
||||
log.debug({ schemaJSON }, 'Received OpenAPI schema');
|
||||
const { nodesAllowlist, nodesDenylist } = getState().config;
|
||||
|
||||
const nodeTemplates = parseSchema(
|
||||
schemaJSON,
|
||||
nodesAllowlist,
|
||||
|
||||
@@ -10,16 +10,16 @@ import { IMAGE_CATEGORIES } from 'features/gallery/store/types';
|
||||
import {
|
||||
LINEAR_UI_OUTPUT,
|
||||
nodeIDDenyList,
|
||||
} from 'features/nodes/util/graphBuilders/constants';
|
||||
} from 'features/nodes/util/graph/constants';
|
||||
import { boardsApi } from 'services/api/endpoints/boards';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import { isImageOutput } from 'services/api/guards';
|
||||
import { imagesAdapter } from 'services/api/util';
|
||||
import {
|
||||
appSocketInvocationComplete,
|
||||
socketInvocationComplete,
|
||||
} from 'services/events/actions';
|
||||
import { startAppListening } from '../..';
|
||||
import { isImageOutput } from 'features/nodes/types/common';
|
||||
|
||||
// These nodes output an image, but do not actually *save* an image, so we don't want to handle the gallery logic on them
|
||||
const nodeTypeDenylist = ['load_image', 'image'];
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user