mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-18 03:28:05 -05:00
Compare commits
57 Commits
lstein/deb
...
psyche/fea
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f3c6396339 | ||
|
|
c77eff8500 | ||
|
|
6d366fb519 | ||
|
|
0f02a72cb9 | ||
|
|
37fd57d4d9 | ||
|
|
cf0c7d66ed | ||
|
|
5b016bf376 | ||
|
|
e7a096dec1 | ||
|
|
281ecd5a9a | ||
|
|
9cbf78542c | ||
|
|
34f5259980 | ||
|
|
2ecbb9f720 | ||
|
|
ab36d7c0f2 | ||
|
|
05d6661877 | ||
|
|
eb558d72d8 | ||
|
|
4687739319 | ||
|
|
168b35f86d | ||
|
|
07fe0e8dc8 | ||
|
|
45fc7d8054 | ||
|
|
eafc85cfe3 | ||
|
|
ddf917f68c | ||
|
|
c90807ba33 | ||
|
|
842b57e57c | ||
|
|
f538ed54fb | ||
|
|
d0a936ebd4 | ||
|
|
27622dfd5e | ||
|
|
72b44f7ebc | ||
|
|
7726d312e1 | ||
|
|
61520dfb86 | ||
|
|
6e869e6038 | ||
|
|
9eacc0c189 | ||
|
|
23606d9e83 | ||
|
|
d4d0fea078 | ||
|
|
a5771f6120 | ||
|
|
35f847d5b7 | ||
|
|
3278497674 | ||
|
|
c9350f71be | ||
|
|
b00e27b022 | ||
|
|
a6283b9fb6 | ||
|
|
64fb15e117 | ||
|
|
7019d93ff0 | ||
|
|
7467768d48 | ||
|
|
e2d7b514e0 | ||
|
|
c36d12a50f | ||
|
|
c7f8fe4d5e | ||
|
|
ffb41c3616 | ||
|
|
611006b692 | ||
|
|
ca496f0380 | ||
|
|
01d8ab04a5 | ||
|
|
7a4122235f | ||
|
|
75f4e27522 | ||
|
|
8ae757334e | ||
|
|
2038064a34 | ||
|
|
689cb9d31d | ||
|
|
0cab1d1e04 | ||
|
|
9bd7dabed3 | ||
|
|
30283a4767 |
12
.github/pull_request_template.md
vendored
12
.github/pull_request_template.md
vendored
@@ -1,12 +1,10 @@
|
||||
<!--Thanks for contributing!-->
|
||||
|
||||
## Summary
|
||||
|
||||
<!--A description of the changes in this PR. Include the kind of change (fix, feature, docs, etc), the "why" and the "how". Screenshots or videos are useful for frontend changes.-->
|
||||
|
||||
## Related Issues / Discussions
|
||||
|
||||
<!--List any related issues or discussions on github or discord. If this PR closes an issue, please use the "Closes #1234" format, so that the issue will be automatically closed when the PR merges.-->
|
||||
<!--WHEN APPLICABLE: List any related issues or discussions on github or discord. If this PR closes an issue, please use the "Closes #1234" format, so that the issue will be automatically closed when the PR merges.-->
|
||||
|
||||
## QA Instructions
|
||||
|
||||
@@ -18,8 +16,6 @@
|
||||
|
||||
## Checklist
|
||||
|
||||
<!--If any of these are not completed or not applicable to the change, please add a note.-->
|
||||
|
||||
- [ ] The PR has a short but descriptive title
|
||||
- [ ] Tests added / updated
|
||||
- [ ] Documentation added / updated
|
||||
- [ ] _The PR has a short but descriptive title, suitable for a changelog_
|
||||
- [ ] _Tests added / updated (if applicable)_
|
||||
- [ ] _Documentation added / updated (if applicable)_
|
||||
|
||||
@@ -2,17 +2,25 @@
|
||||
## Any environment variables supported by InvokeAI can be specified here,
|
||||
## in addition to the examples below.
|
||||
|
||||
# HOST_INVOKEAI_ROOT is the path on the docker host's filesystem where InvokeAI will store data.
|
||||
# Outputs will also be stored here by default.
|
||||
# If relative, it will be relative to the docker directory in which the docker-compose.yml file is located
|
||||
#HOST_INVOKEAI_ROOT=../../invokeai-data
|
||||
|
||||
# INVOKEAI_ROOT is the path to the root of the InvokeAI repository within the container.
|
||||
## INVOKEAI_ROOT is the path *on the host system* where Invoke will store its data.
|
||||
## It is mounted into the container and allows both containerized and non-containerized usage of Invoke.
|
||||
# Usually this is the only variable you need to set. It can be relative or absolute.
|
||||
# INVOKEAI_ROOT=~/invokeai
|
||||
|
||||
# Get this value from your HuggingFace account settings page.
|
||||
# HUGGING_FACE_HUB_TOKEN=
|
||||
## HOST_INVOKEAI_ROOT and CONTAINER_INVOKEAI_ROOT can be used to control the on-host
|
||||
## and in-container paths separately, if needed.
|
||||
## HOST_INVOKEAI_ROOT is the path on the docker host's filesystem where Invoke will store data.
|
||||
## If relative, it will be relative to the docker directory in which the docker-compose.yml file is located
|
||||
## CONTAINER_INVOKEAI_ROOT is the path within the container where Invoke will expect to find the runtime directory.
|
||||
## It MUST be absolute. There is usually no need to change this.
|
||||
# HOST_INVOKEAI_ROOT=../../invokeai-data
|
||||
# CONTAINER_INVOKEAI_ROOT=/invokeai
|
||||
|
||||
## optional variables specific to the docker setup.
|
||||
## INVOKEAI_PORT is the port on which the InvokeAI web interface will be available
|
||||
# INVOKEAI_PORT=9090
|
||||
|
||||
## GPU_DRIVER can be set to either `nvidia` or `rocm` to enable GPU support in the container accordingly.
|
||||
# GPU_DRIVER=nvidia #| rocm
|
||||
|
||||
## CONTAINER_UID can be set to the UID of the user on the host system that should own the files in the container.
|
||||
# CONTAINER_UID=1000
|
||||
|
||||
@@ -18,8 +18,6 @@ ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
ARG TORCH_VERSION=2.1.2
|
||||
ARG TORCHVISION_VERSION=0.16.2
|
||||
ARG GPU_DRIVER=cuda
|
||||
ARG TARGETPLATFORM="linux/amd64"
|
||||
# unused but available
|
||||
@@ -27,7 +25,12 @@ ARG BUILDPLATFORM
|
||||
|
||||
WORKDIR ${INVOKEAI_SRC}
|
||||
|
||||
# Install pytorch before all other pip packages
|
||||
COPY invokeai ./invokeai
|
||||
COPY pyproject.toml ./
|
||||
|
||||
# Editable mode helps use the same image for development:
|
||||
# the local working copy can be bind-mounted into the image
|
||||
# at path defined by ${INVOKEAI_SRC}
|
||||
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
||||
# x86_64/CUDA is default
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
@@ -39,20 +42,10 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
else \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \
|
||||
fi &&\
|
||||
pip install $extra_index_url_arg \
|
||||
torch==$TORCH_VERSION \
|
||||
torchvision==$TORCHVISION_VERSION
|
||||
|
||||
# Install the local package.
|
||||
# Editable mode helps use the same image for development:
|
||||
# the local working copy can be bind-mounted into the image
|
||||
# at path defined by ${INVOKEAI_SRC}
|
||||
COPY invokeai ./invokeai
|
||||
COPY pyproject.toml ./
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
# xformers + triton fails to install on arm64
|
||||
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
pip install -e ".[xformers]"; \
|
||||
pip install $extra_index_url_arg -e ".[xformers]"; \
|
||||
else \
|
||||
pip install $extra_index_url_arg -e "."; \
|
||||
fi
|
||||
@@ -101,6 +94,8 @@ RUN apt update && apt install -y --no-install-recommends \
|
||||
ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||
ENV INVOKEAI_ROOT=/invokeai
|
||||
ENV INVOKEAI_HOST=0.0.0.0
|
||||
ENV INVOKEAI_PORT=9090
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
|
||||
ENV CONTAINER_UID=${CONTAINER_UID:-1000}
|
||||
ENV CONTAINER_GID=${CONTAINER_GID:-1000}
|
||||
@@ -125,4 +120,4 @@ RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${IN
|
||||
|
||||
COPY docker/docker-entrypoint.sh ./
|
||||
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
||||
CMD ["invokeai-web", "--host", "0.0.0.0"]
|
||||
CMD ["invokeai-web"]
|
||||
|
||||
@@ -8,35 +8,28 @@ x-invokeai: &invokeai
|
||||
context: ..
|
||||
dockerfile: docker/Dockerfile
|
||||
|
||||
# variables without a default will automatically inherit from the host environment
|
||||
environment:
|
||||
- INVOKEAI_ROOT
|
||||
- HF_HOME
|
||||
|
||||
# Create a .env file in the same directory as this docker-compose.yml file
|
||||
# and populate it with environment variables. See .env.sample
|
||||
env_file:
|
||||
- .env
|
||||
|
||||
# variables without a default will automatically inherit from the host environment
|
||||
environment:
|
||||
# if set, CONTAINER_INVOKEAI_ROOT will override the Invoke runtime directory location *inside* the container
|
||||
- INVOKEAI_ROOT=${CONTAINER_INVOKEAI_ROOT:-/invokeai}
|
||||
- HF_HOME
|
||||
ports:
|
||||
- "${INVOKEAI_PORT:-9090}:9090"
|
||||
- "${INVOKEAI_PORT:-9090}:${INVOKEAI_PORT:-9090}"
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ${HOST_INVOKEAI_ROOT:-${INVOKEAI_ROOT:-~/invokeai}}
|
||||
target: ${INVOKEAI_ROOT:-/invokeai}
|
||||
target: ${CONTAINER_INVOKEAI_ROOT:-/invokeai}
|
||||
bind:
|
||||
create_host_path: true
|
||||
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
|
||||
# - ${INVOKEAI_MODELS_DIR:-${INVOKEAI_ROOT:-/invokeai/models}}
|
||||
# - ${INVOKEAI_MODELS_CONFIG_PATH:-${INVOKEAI_ROOT:-/invokeai/configs/models.yaml}}
|
||||
tty: true
|
||||
stdin_open: true
|
||||
|
||||
# # Example of running alternative commands/scripts in the container
|
||||
# command:
|
||||
# - bash
|
||||
# - -c
|
||||
# - |
|
||||
# invokeai-model-install --yes --default-only --config_file ${INVOKEAI_ROOT}/config_custom.yaml
|
||||
# invokeai-nodes-web --host 0.0.0.0
|
||||
|
||||
services:
|
||||
invokeai-nvidia:
|
||||
|
||||
@@ -33,7 +33,8 @@ if [[ -v "PUBLIC_KEY" ]] && [[ ! -d "${HOME}/.ssh" ]]; then
|
||||
service ssh start
|
||||
fi
|
||||
|
||||
|
||||
mkdir -p "${INVOKEAI_ROOT}"
|
||||
chown --recursive ${USER} "${INVOKEAI_ROOT}"
|
||||
cd "${INVOKEAI_ROOT}"
|
||||
|
||||
# Run the CMD as the Container User (not root).
|
||||
|
||||
@@ -119,21 +119,21 @@ The provided token will be added as a `Bearer` token to the network requests to
|
||||
|
||||
#### Model Hashing
|
||||
|
||||
Models are hashed during installation, providing a stable identifier for models across all platforms. The default algorithm is `blake3`, with a multi-threaded implementation.
|
||||
|
||||
If your models are stored on a spinning hard drive, we suggest using `blake3_single`, the single-threaded implementation. The hashes are the same, but it's much faster on spinning disks.
|
||||
Models are hashed during installation, providing a stable identifier for models across all platforms. Hashing is a one-time operation.
|
||||
|
||||
```yaml
|
||||
hashing_algorithm: blake3_single
|
||||
hashing_algorithm: blake3_single # default value
|
||||
```
|
||||
|
||||
Model hashing is a one-time operation, but it may take a couple minutes to hash a large model collection. You may opt out of model hashing entirely by setting the algorithm to `random`.
|
||||
You might want to change this setting, depending on your system:
|
||||
|
||||
```yaml
|
||||
hashing_algorithm: random
|
||||
```
|
||||
- `blake3_single` (default): Single-threaded - best for spinning HDDs, still OK for SSDs
|
||||
- `blake3_multi`: Parallelized, memory-mapped implementation - best for SSDs, terrible for spinning disks
|
||||
- `random`: Skip hashing entirely - fastest but of course no hash
|
||||
|
||||
Most common algorithms are supported, like `md5`, `sha256`, and `sha512`. These are typically much, much slower than `blake3`.
|
||||
During the first startup after upgrading to v4, all of your models will be hashed. This can take a few minutes.
|
||||
|
||||
Most common algorithms are supported, like `md5`, `sha256`, and `sha512`. These are typically much, much slower than either of the BLAKE3 variants.
|
||||
|
||||
#### Path Settings
|
||||
|
||||
@@ -190,5 +190,48 @@ The `log_format` option provides several alternative formats:
|
||||
- `syslog` - the log level and error message only, allowing the syslog system to attach the time and date
|
||||
- `legacy` - a format similar to the one used by the legacy 2.3 InvokeAI releases.
|
||||
|
||||
### Model Cache
|
||||
|
||||
#### `glibc` Memory Allocator Fragmentation
|
||||
|
||||
Python (and PyTorch) relies on the memory allocator from the C Standard Library (`libc`). On linux, with the GNU C Standard Library implementation (`glibc`), our memory access patterns have been observed to cause severe memory fragmentation. This fragmentation results in large amounts of memory that has been freed but can't be released back to the OS. Loading models from disk and moving them between CPU/CUDA seem to be the operations that contribute most to the fragmentation. This memory fragmentation issue can result in OOM crashes during frequent model switching, even if `max_cache_size` is set to a reasonable value (e.g. a OOM crash with `max_cache_size=16` on a system with 32GB of RAM).
|
||||
|
||||
This problem may also exist on other OSes, and other `libc` implementations. But, at the time of writing, it has only been investigated on linux with `glibc`.
|
||||
|
||||
To better understand how the `glibc` memory allocator works, see these references:
|
||||
|
||||
- Basics: <https://www.gnu.org/software/libc/manual/html_node/The-GNU-Allocator.html>
|
||||
- Details: <https://sourceware.org/glibc/wiki/MallocInternals>
|
||||
|
||||
Note the differences between memory allocated as chunks in an arena vs. memory allocated with `mmap`. Under `glibc`'s default configuration, most model tensors get allocated as chunks in an arena making them vulnerable to the problem of fragmentation.
|
||||
|
||||
##### Workaround
|
||||
|
||||
We can work around this memory fragmentation issue by setting the following env var:
|
||||
|
||||
```bash
|
||||
# Force blocks >1MB to be allocated with `mmap` so that they are released to the system immediately when they are freed.
|
||||
MALLOC_MMAP_THRESHOLD_=1048576
|
||||
```
|
||||
|
||||
If you use the `invoke.sh` launcher script, you do not need to set this env var, as we set it to `1048576` for you.
|
||||
|
||||
##### Manual Configuration
|
||||
|
||||
In case the default value causes performance issues, you can pass `--malloc_threshold` to the `invoke.sh`:
|
||||
|
||||
- Set the env var to a specific value: `./invoke.sh --malloc_threshold=0 # release _all_ blocks asap` or `./invoke.sh --malloc_threshold=16777216 # raise the limit to 16MB`
|
||||
- Unset the env var (let the OS handle the value dynamically, may create a memory leak): `./invoke.sh --malloc_threshold=unset`
|
||||
|
||||
##### Supplementary Light Reading
|
||||
|
||||
See the following references for more information about the `malloc` tunable parameters:
|
||||
|
||||
- <https://www.gnu.org/software/libc/manual/html_node/Malloc-Tunable-Parameters.html>
|
||||
- <https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html>
|
||||
- <https://man7.org/linux/man-pages/man3/mallopt.3.html>
|
||||
|
||||
The model cache emits debug logs that provide visibility into the state of the `libc` memory allocator. See the `LibcUtil` class for more info on how these `libc` malloc stats are collected.
|
||||
|
||||
[basic guide to yaml files]: https://circleci.com/blog/what-is-yaml-a-beginner-s-guide/
|
||||
[Model Marketplace API Keys]: #model-marketplace-api-keys
|
||||
|
||||
@@ -46,8 +46,31 @@ if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
|
||||
# Avoid glibc memory fragmentation. See invokeai/backend/model_management/README.md for details.
|
||||
export MALLOC_MMAP_THRESHOLD_=1048576
|
||||
# Avoid glibc memory fragmentation. See #6007, #4784 and docs/features/CONFIGURATION.md for details.
|
||||
# Some systems may need this to be set to a different value, so we may override this via command-line argument below.
|
||||
export MALLOC_MMAP_THRESHOLD_=1048576 # 1MB
|
||||
|
||||
# This will be passed on to `invokeai-web`
|
||||
PARAMS=()
|
||||
|
||||
# Parse command-line arguments
|
||||
for arg in "$@"; do
|
||||
if [[ $arg == --malloc_threshold=* ]]; then
|
||||
# Re-set MALLOC_MMAP_THRESHOLD_ from the argument if provided
|
||||
value="${arg#*=}"
|
||||
if [[ $value == "unset" ]]; then
|
||||
unset MALLOC_MMAP_THRESHOLD_
|
||||
elif [[ $value =~ ^[0-9]+$ ]]; then
|
||||
export MALLOC_MMAP_THRESHOLD_="$value"
|
||||
else
|
||||
echo "Invalid value for --malloc_threshold. Please provide a valid positive integer or 'unset'."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
# Add other arguments to PARAMS
|
||||
PARAMS+=("$arg")
|
||||
fi
|
||||
done
|
||||
|
||||
# Primary function for the case statement to determine user input
|
||||
do_choice() {
|
||||
@@ -55,7 +78,7 @@ do_choice() {
|
||||
1)
|
||||
clear
|
||||
printf "Generate images with a browser-based interface\n"
|
||||
invokeai-web $PARAMS
|
||||
invokeai-web "${PARAMS[@]}"
|
||||
;;
|
||||
2)
|
||||
clear
|
||||
|
||||
@@ -21,10 +21,11 @@ from typing_extensions import Annotated
|
||||
|
||||
from invokeai.app.services.model_install import ModelInstallJob
|
||||
from invokeai.app.services.model_records import (
|
||||
DuplicateModelException,
|
||||
InvalidModelException,
|
||||
ModelRecordChanges,
|
||||
UnknownModelException,
|
||||
)
|
||||
from invokeai.app.services.model_records.model_records_base import DuplicateModelException, ModelRecordChanges
|
||||
from invokeai.app.util.suppress_output import SuppressOutput
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
@@ -37,7 +38,7 @@ from invokeai.backend.model_manager.config import (
|
||||
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import ModelMetadataWithFiles, UnknownMetadataException
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
from invokeai.backend.model_manager.starter_models import STARTER_MODELS, StarterModel
|
||||
from invokeai.backend.model_manager.starter_models import STARTER_MODELS, StarterModel, StarterModelWithoutDependencies
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
@@ -309,8 +310,10 @@ async def update_model_record(
|
||||
"""Update a model's config."""
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
record_store = ApiDependencies.invoker.services.model_manager.store
|
||||
installer = ApiDependencies.invoker.services.model_manager.install
|
||||
try:
|
||||
model_response: AnyModelConfig = record_store.update_model(key, changes=changes)
|
||||
record_store.update_model(key, changes=changes)
|
||||
model_response: AnyModelConfig = installer.sync_model_path(key)
|
||||
logger.info(f"Updated model: {key}")
|
||||
except UnknownModelException as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
@@ -797,9 +800,9 @@ async def get_starter_models() -> list[StarterModel]:
|
||||
if model.source in installed_model_sources:
|
||||
model.is_installed = True
|
||||
# Remove already-installed dependencies
|
||||
missing_deps: list[str] = []
|
||||
missing_deps: list[StarterModelWithoutDependencies] = []
|
||||
for dep in model.dependencies or []:
|
||||
if dep not in installed_model_sources:
|
||||
if dep.source not in installed_model_sources:
|
||||
missing_deps.append(dep)
|
||||
model.dependencies = missing_deps
|
||||
|
||||
|
||||
@@ -7,12 +7,8 @@ from typing import Dict, List, Literal, Union
|
||||
import cv2
|
||||
import numpy as np
|
||||
from controlnet_aux import (
|
||||
CannyDetector,
|
||||
ContentShuffleDetector,
|
||||
HEDdetector,
|
||||
LeresDetector,
|
||||
LineartAnimeDetector,
|
||||
LineartDetector,
|
||||
MediapipeFaceDetector,
|
||||
MidasDetector,
|
||||
MLSDdetector,
|
||||
@@ -39,8 +35,12 @@ from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.image_util.canny import get_canny_edges
|
||||
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
|
||||
from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector
|
||||
from invokeai.backend.image_util.hed import HEDProcessor
|
||||
from invokeai.backend.image_util.lineart import LineartProcessor
|
||||
from invokeai.backend.image_util.lineart_anime import LineartAnimeProcessor
|
||||
|
||||
from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
|
||||
@@ -171,11 +171,12 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Canny Processor",
|
||||
tags=["controlnet", "canny"],
|
||||
category="controlnet",
|
||||
version="1.3.1",
|
||||
version="1.3.2",
|
||||
)
|
||||
class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Canny edge detection for ControlNet"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
low_threshold: int = InputField(
|
||||
default=100, ge=0, le=255, description="The low threshold of the Canny pixel gradient (0-255)"
|
||||
@@ -188,12 +189,12 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
||||
# Keep alpha channel for Canny processing to detect edges of transparent areas
|
||||
return context.images.get_pil(self.image.image_name, "RGBA")
|
||||
|
||||
def run_processor(self, image):
|
||||
canny_processor = CannyDetector()
|
||||
processed_image = canny_processor(
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
processed_image = get_canny_edges(
|
||||
image,
|
||||
self.low_threshold,
|
||||
self.high_threshold,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
)
|
||||
return processed_image
|
||||
@@ -215,9 +216,9 @@ class HedImageProcessorInvocation(ImageProcessorInvocation):
|
||||
# safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
||||
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
||||
|
||||
def run_processor(self, image):
|
||||
hed_processor = HEDdetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = hed_processor(
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
hed_processor = HEDProcessor()
|
||||
processed_image = hed_processor.run(
|
||||
image,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
@@ -242,9 +243,9 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
coarse: bool = InputField(default=False, description="Whether to use coarse mode")
|
||||
|
||||
def run_processor(self, image):
|
||||
lineart_processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = lineart_processor(
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
lineart_processor = LineartProcessor()
|
||||
processed_image = lineart_processor.run(
|
||||
image, detect_resolution=self.detect_resolution, image_resolution=self.image_resolution, coarse=self.coarse
|
||||
)
|
||||
return processed_image
|
||||
@@ -263,9 +264,9 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
processor = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = processor(
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
processor = LineartAnimeProcessor()
|
||||
processed_image = processor.run(
|
||||
image,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
@@ -278,13 +279,14 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Midas Depth Processor",
|
||||
tags=["controlnet", "midas"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Midas depth processing to image"""
|
||||
|
||||
a_mult: float = InputField(default=2.0, ge=0, description="Midas parameter `a_mult` (a = a_mult * PI)")
|
||||
bg_th: float = InputField(default=0.1, ge=0, description="Midas parameter `bg_th`")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
# depth_and_normal not supported in controlnet_aux v0.0.3
|
||||
# depth_and_normal: bool = InputField(default=False, description="whether to use depth and normal mode")
|
||||
@@ -296,6 +298,7 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
a=np.pi * self.a_mult,
|
||||
bg_th=self.bg_th,
|
||||
image_resolution=self.image_resolution,
|
||||
detect_resolution=self.detect_resolution,
|
||||
# dept_and_normal not supported in controlnet_aux v0.0.3
|
||||
# depth_and_normal=self.depth_and_normal,
|
||||
)
|
||||
@@ -420,19 +423,24 @@ class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Mediapipe Face Processor",
|
||||
tags=["controlnet", "mediapipe", "face"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies mediapipe face processing to image"""
|
||||
|
||||
max_faces: int = InputField(default=1, ge=1, description="Maximum number of faces to detect")
|
||||
min_confidence: float = InputField(default=0.5, ge=0, le=1, description="Minimum confidence for face detection")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
mediapipe_face_processor = MediapipeFaceDetector()
|
||||
processed_image = mediapipe_face_processor(
|
||||
image, max_faces=self.max_faces, min_confidence=self.min_confidence, image_resolution=self.image_resolution
|
||||
image,
|
||||
max_faces=self.max_faces,
|
||||
min_confidence=self.min_confidence,
|
||||
image_resolution=self.image_resolution,
|
||||
detect_resolution=self.detect_resolution,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
@@ -511,11 +519,12 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Segment Anything Processor",
|
||||
tags=["controlnet", "segmentanything"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies segment anything processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
@@ -524,7 +533,9 @@ class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
|
||||
"ybelkada/segment-anything", subfolder="checkpoints"
|
||||
)
|
||||
np_img = np.array(image, dtype=np.uint8)
|
||||
processed_image = segment_anything_processor(np_img, image_resolution=self.image_resolution)
|
||||
processed_image = segment_anything_processor(
|
||||
np_img, image_resolution=self.image_resolution, detect_resolution=self.detect_resolution
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
|
||||
@@ -967,3 +967,56 @@ class CanvasPasteBackInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
image_dto = context.images.save(image=source_image)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
@invocation(
|
||||
"mask_from_id",
|
||||
title="Mask from ID",
|
||||
tags=["image", "mask", "id"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
)
|
||||
class MaskFromIDInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generate a mask for a particular color in an ID Map"""
|
||||
|
||||
image: ImageField = InputField(description="The image to create the mask from")
|
||||
color: ColorField = InputField(description="ID color to mask")
|
||||
threshold: int = InputField(default=100, description="Threshold for color detection")
|
||||
invert: bool = InputField(default=False, description="Whether or not to invert the mask")
|
||||
|
||||
def rgba_to_hex(self, rgba_color: tuple[int, int, int, int]):
|
||||
r, g, b, a = rgba_color
|
||||
hex_code = "#{:02X}{:02X}{:02X}{:02X}".format(r, g, b, int(a * 255))
|
||||
return hex_code
|
||||
|
||||
def id_to_mask(self, id_mask: Image.Image, color: tuple[int, int, int, int], threshold: int = 100):
|
||||
if id_mask.mode != "RGB":
|
||||
id_mask = id_mask.convert("RGB")
|
||||
|
||||
# Can directly just use the tuple but I'll leave this rgba_to_hex here
|
||||
# incase anyone prefers using hex codes directly instead of the color picker
|
||||
hex_color_str = self.rgba_to_hex(color)
|
||||
rgb_color = numpy.array([int(hex_color_str[i : i + 2], 16) for i in (1, 3, 5)])
|
||||
|
||||
# Maybe there's a faster way to calculate this distance but I can't think of any right now.
|
||||
color_distance = numpy.linalg.norm(id_mask - rgb_color, axis=-1)
|
||||
|
||||
# Create a mask based on the threshold and the distance calculated above
|
||||
binary_mask = (color_distance < threshold).astype(numpy.uint8) * 255
|
||||
|
||||
# Convert the mask back to PIL
|
||||
binary_mask_pil = Image.fromarray(binary_mask)
|
||||
|
||||
return binary_mask_pil
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
mask = self.id_to_mask(image, self.color.tuple(), self.threshold)
|
||||
|
||||
if self.invert:
|
||||
mask = ImageOps.invert(mask)
|
||||
|
||||
image_dto = context.images.save(image=mask, image_category=ImageCategory.MASK)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
@@ -31,7 +31,7 @@ ESRGAN_MODELS = Literal[
|
||||
ESRGAN_MODEL_URLS: dict[str, str] = {
|
||||
"RealESRGAN_x4plus.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
|
||||
"RealESRGAN_x4plus_anime_6B.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
|
||||
"ESRGAN_SRx4_DF2KOST_official.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
|
||||
"ESRGAN_SRx4_DF2KOST_official-ff704c30.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
|
||||
"RealESRGAN_x2plus.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ from typing import Any, Literal, Optional
|
||||
import psutil
|
||||
import yaml
|
||||
from pydantic import BaseModel, Field, PrivateAttr, field_validator
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
from pydantic_settings import BaseSettings, PydanticBaseSettingsSource, SettingsConfigDict
|
||||
|
||||
import invokeai.configs as model_configs
|
||||
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS
|
||||
@@ -115,7 +115,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
allow_nodes: List of nodes to allow. Omit to allow all.
|
||||
deny_nodes: List of nodes to deny. Omit to deny none.
|
||||
node_cache_size: How many cached nodes to keep in memory.
|
||||
hashing_algorithm: Model hashing algorthim for model installs. 'blake3' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.<br>Valid values: `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256`, `blake3`, `blake3_single`, `random`
|
||||
hashing_algorithm: Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.<br>Valid values: `blake3_multi`, `blake3_single`, `random`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256`
|
||||
remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.
|
||||
"""
|
||||
|
||||
@@ -191,7 +191,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
node_cache_size: int = Field(default=512, description="How many cached nodes to keep in memory.")
|
||||
|
||||
# MODEL INSTALL
|
||||
hashing_algorithm: HASHING_ALGORITHMS = Field(default="blake3", description="Model hashing algorthim for model installs. 'blake3' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.")
|
||||
hashing_algorithm: HASHING_ALGORITHMS = Field(default="blake3_single", description="Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.")
|
||||
remote_api_tokens: Optional[list[URLRegexTokenPair]] = Field(default=None, description="List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.")
|
||||
|
||||
# fmt: on
|
||||
@@ -332,6 +332,27 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
return root
|
||||
|
||||
|
||||
class DefaultInvokeAIAppConfig(InvokeAIAppConfig):
|
||||
"""A version of `InvokeAIAppConfig` that does not automatically parse any settings from environment variables
|
||||
or any file.
|
||||
|
||||
This is useful for writing out a default config file.
|
||||
|
||||
Note that init settings are set if provided.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def settings_customise_sources(
|
||||
cls,
|
||||
settings_cls: type[BaseSettings],
|
||||
init_settings: PydanticBaseSettingsSource,
|
||||
env_settings: PydanticBaseSettingsSource,
|
||||
dotenv_settings: PydanticBaseSettingsSource,
|
||||
file_secret_settings: PydanticBaseSettingsSource,
|
||||
) -> tuple[PydanticBaseSettingsSource, ...]:
|
||||
return (init_settings,)
|
||||
|
||||
|
||||
def migrate_v3_config_dict(config_dict: dict[str, Any]) -> InvokeAIAppConfig:
|
||||
"""Migrate a v3 config dictionary to a current config object.
|
||||
|
||||
@@ -367,7 +388,8 @@ def migrate_v3_config_dict(config_dict: dict[str, Any]) -> InvokeAIAppConfig:
|
||||
elif k in InvokeAIAppConfig.model_fields:
|
||||
# skip unknown fields
|
||||
parsed_config_dict[k] = v
|
||||
config = InvokeAIAppConfig.model_validate(parsed_config_dict)
|
||||
# When migrating the config file, we should not include currently-set environment variables.
|
||||
config = DefaultInvokeAIAppConfig.model_validate(parsed_config_dict)
|
||||
|
||||
return config
|
||||
|
||||
@@ -391,14 +413,13 @@ def load_and_migrate_config(config_path: Path) -> InvokeAIAppConfig:
|
||||
# This is a v3 config file, attempt to migrate it
|
||||
shutil.copy(config_path, config_path.with_suffix(".yaml.bak"))
|
||||
try:
|
||||
# This could be the wrong shape, but we will catch all exceptions below
|
||||
config = migrate_v3_config_dict(loaded_config_dict) # pyright: ignore [reportUnknownArgumentType]
|
||||
# loaded_config_dict could be the wrong shape, but we will catch all exceptions below
|
||||
migrated_config = migrate_v3_config_dict(loaded_config_dict) # pyright: ignore [reportUnknownArgumentType]
|
||||
except Exception as e:
|
||||
shutil.copy(config_path.with_suffix(".yaml.bak"), config_path)
|
||||
raise RuntimeError(f"Failed to load and migrate v3 config file {config_path}: {e}") from e
|
||||
# By excluding defaults, we ensure that the new config file only contains the settings that were explicitly set
|
||||
config.write_file(config_path)
|
||||
return config
|
||||
migrated_config.write_file(config_path)
|
||||
return migrated_config
|
||||
else:
|
||||
# Attempt to load as a v4 config file
|
||||
try:
|
||||
@@ -426,6 +447,7 @@ def get_config() -> InvokeAIAppConfig:
|
||||
|
||||
On subsequent calls, the object is returned from the cache.
|
||||
"""
|
||||
# This object includes environment variables, as parsed by pydantic-settings
|
||||
config = InvokeAIAppConfig()
|
||||
|
||||
args = InvokeAIArgs.args
|
||||
@@ -441,8 +463,8 @@ def get_config() -> InvokeAIAppConfig:
|
||||
if config_file := getattr(args, "config_file", None):
|
||||
config._config_file = Path(config_file)
|
||||
|
||||
# Create the example file from a deep copy, with some extra values provided
|
||||
example_config = config.model_copy(deep=True)
|
||||
# Create the example config file, with some extra example values provided
|
||||
example_config = DefaultInvokeAIAppConfig()
|
||||
example_config.remote_api_tokens = [
|
||||
URLRegexTokenPair(url_regex="cool-models.com", token="my_secret_token"),
|
||||
URLRegexTokenPair(url_regex="nifty-models.com", token="some_other_token"),
|
||||
@@ -454,10 +476,12 @@ def get_config() -> InvokeAIAppConfig:
|
||||
shutil.copytree(configs_src, config.legacy_conf_path, dirs_exist_ok=True)
|
||||
|
||||
if config.config_file_path.exists():
|
||||
incoming_config = load_and_migrate_config(config.config_file_path)
|
||||
config_from_file = load_and_migrate_config(config.config_file_path)
|
||||
# Clobbering here will overwrite any settings that were set via environment variables
|
||||
config.update_config(incoming_config, clobber=False)
|
||||
config.update_config(config_from_file, clobber=False)
|
||||
else:
|
||||
config.write_file(config.config_file_path)
|
||||
# We should never write env vars to the config file
|
||||
default_config = DefaultInvokeAIAppConfig()
|
||||
default_config.write_file(config.config_file_path, as_example=False)
|
||||
|
||||
return config
|
||||
|
||||
@@ -85,8 +85,10 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
self._logger.info(f"Waiting for {len(active_jobs)} active download jobs to complete")
|
||||
with self._queue.mutex:
|
||||
self._queue.queue.clear()
|
||||
self.join() # wait for all active jobs to finish
|
||||
self.cancel_all_jobs()
|
||||
self._stop_event.set()
|
||||
for thread in self._worker_pool:
|
||||
thread.join()
|
||||
self._worker_pool.clear()
|
||||
|
||||
def submit_download_job(
|
||||
|
||||
@@ -468,6 +468,19 @@ class ModelInstallServiceBase(ABC):
|
||||
def sync_to_config(self) -> None:
|
||||
"""Synchronize models on disk to those in the model record database."""
|
||||
|
||||
@abstractmethod
|
||||
def sync_model_path(self, key: str) -> AnyModelConfig:
|
||||
"""
|
||||
Move model into the location indicated by its basetype, type and name.
|
||||
|
||||
Call this after updating a model's attributes in order to move
|
||||
the model's path into the location indicated by its basetype, type and
|
||||
name. Applies only to models whose paths are within the root `models_dir`
|
||||
directory.
|
||||
|
||||
May raise an UnknownModelException.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def download_and_cache(self, source: Union[str, AnyHttpUrl], access_token: Optional[str] = None) -> Path:
|
||||
"""
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import threading
|
||||
import time
|
||||
from hashlib import sha256
|
||||
@@ -34,6 +35,7 @@ from invokeai.backend.model_manager.config import (
|
||||
from invokeai.backend.model_manager.metadata import (
|
||||
AnyModelRepoMetadata,
|
||||
HuggingFaceMetadataFetch,
|
||||
ModelMetadataFetchBase,
|
||||
ModelMetadataWithFiles,
|
||||
RemoteModelFile,
|
||||
)
|
||||
@@ -92,6 +94,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._download_cache: Dict[AnyHttpUrl, ModelInstallJob] = {}
|
||||
self._running = False
|
||||
self._session = session
|
||||
self._install_thread: Optional[threading.Thread] = None
|
||||
self._next_job_id = 0
|
||||
|
||||
@property
|
||||
@@ -110,6 +113,18 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
# makes the installer harder to use outside the web app
|
||||
def start(self, invoker: Optional[Invoker] = None) -> None:
|
||||
"""Start the installer thread."""
|
||||
|
||||
# Yes, this is weird. When the installer thread is running, the
|
||||
# thread masks the ^C signal. When we receive a
|
||||
# sigINT, we stop the thread, reset sigINT, and send a new
|
||||
# sigINT to the parent process.
|
||||
def sigint_handler(signum, frame):
|
||||
self.stop()
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
signal.raise_signal(signal.SIGINT)
|
||||
|
||||
signal.signal(signal.SIGINT, sigint_handler)
|
||||
|
||||
with self._lock:
|
||||
if self._running:
|
||||
raise Exception("Attempt to start the installer service twice")
|
||||
@@ -120,13 +135,15 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
|
||||
def stop(self, invoker: Optional[Invoker] = None) -> None:
|
||||
"""Stop the installer thread; after this the object can be deleted and garbage collected."""
|
||||
with self._lock:
|
||||
if not self._running:
|
||||
raise Exception("Attempt to stop the install service before it was started")
|
||||
self._stop_event.set()
|
||||
self._clear_pending_jobs()
|
||||
self._download_cache.clear()
|
||||
self._running = False
|
||||
if not self._running:
|
||||
raise Exception("Attempt to stop the install service before it was started")
|
||||
self._logger.debug("calling stop_event.set()")
|
||||
self._stop_event.set()
|
||||
self._clear_pending_jobs()
|
||||
self._download_cache.clear()
|
||||
assert self._install_thread is not None
|
||||
self._install_thread.join()
|
||||
self._running = False
|
||||
|
||||
def _clear_pending_jobs(self) -> None:
|
||||
for job in self.list_jobs():
|
||||
@@ -275,6 +292,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
if timeout > 0 and time.time() - start > timeout:
|
||||
raise TimeoutError("Timeout exceeded")
|
||||
self._install_queue.join()
|
||||
|
||||
return self._install_jobs
|
||||
|
||||
def cancel_job(self, job: ModelInstallJob) -> None:
|
||||
@@ -345,9 +363,8 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
# Rename `models.yaml` to `models.yaml.bak` to prevent re-migration
|
||||
legacy_models_yaml_path.rename(legacy_models_yaml_path.with_suffix(".yaml.bak"))
|
||||
|
||||
# Remove `legacy_models_yaml_path` from the config file - we are done with it either way
|
||||
# Unset the path - we are done with it either way
|
||||
self._app_config.legacy_models_yaml_path = None
|
||||
self._app_config.write_file(self._app_config.config_file_path)
|
||||
|
||||
def scan_directory(self, scan_dir: Path, install: bool = False) -> List[str]: # noqa D102
|
||||
self._cached_model_paths = {Path(x.path).resolve() for x in self.record_store.all_models()}
|
||||
@@ -373,10 +390,10 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
def unconditionally_delete(self, key: str) -> None: # noqa D102
|
||||
model = self.record_store.get_model(key)
|
||||
model_path = self.app_config.models_path / model.path
|
||||
if model_path.is_dir():
|
||||
rmtree(model_path)
|
||||
else:
|
||||
if model_path.is_file() or model_path.is_symlink():
|
||||
model_path.unlink()
|
||||
elif model_path.is_dir():
|
||||
rmtree(model_path)
|
||||
self.unregister(key)
|
||||
|
||||
def download_and_cache(
|
||||
@@ -415,15 +432,16 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
# Internal functions that manage the installer threads
|
||||
# --------------------------------------------------------------------------------------------
|
||||
def _start_installer_thread(self) -> None:
|
||||
threading.Thread(target=self._install_next_item, daemon=True).start()
|
||||
self._install_thread = threading.Thread(target=self._install_next_item, daemon=True)
|
||||
self._install_thread.start()
|
||||
self._running = True
|
||||
|
||||
def _install_next_item(self) -> None:
|
||||
done = False
|
||||
while not done:
|
||||
self._logger.debug(f"Installer thread {threading.get_ident()} starting")
|
||||
while True:
|
||||
if self._stop_event.is_set():
|
||||
done = True
|
||||
continue
|
||||
break
|
||||
self._logger.debug(f"Installer thread {threading.get_ident()} polling")
|
||||
try:
|
||||
job = self._install_queue.get(timeout=1)
|
||||
except Empty:
|
||||
@@ -436,39 +454,14 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
elif job.errored:
|
||||
self._signal_job_errored(job)
|
||||
|
||||
elif (
|
||||
job.waiting or job.downloads_done
|
||||
): # local jobs will be in waiting state, remote jobs will be downloading state
|
||||
job.total_bytes = self._stat_size(job.local_path)
|
||||
job.bytes = job.total_bytes
|
||||
self._signal_job_running(job)
|
||||
job.config_in["source"] = str(job.source)
|
||||
job.config_in["source_type"] = MODEL_SOURCE_TO_TYPE_MAP[job.source.__class__]
|
||||
# enter the metadata, if there is any
|
||||
if isinstance(job.source_metadata, (HuggingFaceMetadata)):
|
||||
job.config_in["source_api_response"] = job.source_metadata.api_response
|
||||
|
||||
if job.inplace:
|
||||
key = self.register_path(job.local_path, job.config_in)
|
||||
else:
|
||||
key = self.install_path(job.local_path, job.config_in)
|
||||
job.config_out = self.record_store.get_model(key)
|
||||
self._signal_job_completed(job)
|
||||
elif job.waiting or job.downloads_done:
|
||||
self._register_or_install(job)
|
||||
|
||||
except InvalidModelConfigException as excp:
|
||||
if any(x.content_type is not None and "text/html" in x.content_type for x in job.download_parts):
|
||||
job.set_error(
|
||||
InvalidModelConfigException(
|
||||
f"At least one file in {job.local_path} is an HTML page, not a model. This can happen when an access token is required to download."
|
||||
)
|
||||
)
|
||||
else:
|
||||
job.set_error(excp)
|
||||
self._signal_job_errored(job)
|
||||
self._set_error(job, excp)
|
||||
|
||||
except (OSError, DuplicateModelException) as excp:
|
||||
job.set_error(excp)
|
||||
self._signal_job_errored(job)
|
||||
self._set_error(job, excp)
|
||||
|
||||
finally:
|
||||
# if this is an install of a remote file, then clean up the temporary directory
|
||||
@@ -476,6 +469,36 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
rmtree(job._install_tmpdir)
|
||||
self._install_completed_event.set()
|
||||
self._install_queue.task_done()
|
||||
self._logger.info(f"Installer thread {threading.get_ident()} exiting")
|
||||
|
||||
def _register_or_install(self, job: ModelInstallJob) -> None:
|
||||
# local jobs will be in waiting state, remote jobs will be downloading state
|
||||
job.total_bytes = self._stat_size(job.local_path)
|
||||
job.bytes = job.total_bytes
|
||||
self._signal_job_running(job)
|
||||
job.config_in["source"] = str(job.source)
|
||||
job.config_in["source_type"] = MODEL_SOURCE_TO_TYPE_MAP[job.source.__class__]
|
||||
# enter the metadata, if there is any
|
||||
if isinstance(job.source_metadata, (HuggingFaceMetadata)):
|
||||
job.config_in["source_api_response"] = job.source_metadata.api_response
|
||||
|
||||
if job.inplace:
|
||||
key = self.register_path(job.local_path, job.config_in)
|
||||
else:
|
||||
key = self.install_path(job.local_path, job.config_in)
|
||||
job.config_out = self.record_store.get_model(key)
|
||||
self._signal_job_completed(job)
|
||||
|
||||
def _set_error(self, job: ModelInstallJob, excp: Exception) -> None:
|
||||
if any(x.content_type is not None and "text/html" in x.content_type for x in job.download_parts):
|
||||
job.set_error(
|
||||
InvalidModelConfigException(
|
||||
f"At least one file in {job.local_path} is an HTML page, not a model. This can happen when an access token is required to download."
|
||||
)
|
||||
)
|
||||
else:
|
||||
job.set_error(excp)
|
||||
self._signal_job_errored(job)
|
||||
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# Internal functions that manage the models directory
|
||||
@@ -516,7 +539,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
installed.update(self.scan_directory(models_dir))
|
||||
self._logger.info(f"{len(installed)} new models registered; {len(defunct_models)} unregistered")
|
||||
|
||||
def _sync_model_path(self, key: str) -> AnyModelConfig:
|
||||
def sync_model_path(self, key: str) -> AnyModelConfig:
|
||||
"""
|
||||
Move model into the location indicated by its basetype, type and name.
|
||||
|
||||
@@ -528,16 +551,13 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
May raise an UnknownModelException.
|
||||
"""
|
||||
model = self.record_store.get_model(key)
|
||||
old_path = Path(model.path)
|
||||
models_dir = self.app_config.models_path
|
||||
old_path = Path(model.path).resolve()
|
||||
models_dir = self.app_config.models_path.resolve()
|
||||
|
||||
try:
|
||||
old_path.relative_to(models_dir)
|
||||
if not old_path.is_relative_to(models_dir):
|
||||
return model
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
new_path = models_dir / model.base.value / model.type.value / old_path.name
|
||||
new_path = (models_dir / model.base.value / model.type.value / model.name).with_suffix(old_path.suffix)
|
||||
|
||||
if old_path == new_path or new_path.exists() and old_path == new_path.resolve():
|
||||
return model
|
||||
@@ -549,11 +569,11 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
return model
|
||||
|
||||
def _scan_register(self, model: Path) -> bool:
|
||||
if model in self._cached_model_paths:
|
||||
if model.resolve() in self._cached_model_paths:
|
||||
return True
|
||||
try:
|
||||
id = self.register_path(model)
|
||||
self._sync_model_path(id) # possibly move it to right place in `models`
|
||||
self.sync_model_path(id) # possibly move it to right place in `models`
|
||||
self._logger.info(f"Registered {model.name} with id {id}")
|
||||
self._models_installed.add(id)
|
||||
except DuplicateModelException:
|
||||
@@ -722,12 +742,13 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
install_job._install_tmpdir = tmpdir
|
||||
assert install_job.total_bytes is not None # to avoid type checking complaints in the loop below
|
||||
|
||||
self._logger.info(f"Queuing {source} for downloading")
|
||||
files_string = "file" if len(remote_files) == 1 else "file"
|
||||
self._logger.info(f"Queuing model install: {source} ({len(remote_files)} {files_string})")
|
||||
self._logger.debug(f"remote_files={remote_files}")
|
||||
for model_file in remote_files:
|
||||
url = model_file.url
|
||||
path = root / model_file.path.relative_to(subfolder)
|
||||
self._logger.info(f"Downloading {url} => {path}")
|
||||
self._logger.debug(f"Downloading {url} => {path}")
|
||||
install_job.total_bytes += model_file.size
|
||||
assert hasattr(source, "access_token")
|
||||
dest = tmpdir / path.parent
|
||||
@@ -763,7 +784,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
# Callbacks are executed by the download queue in a separate thread
|
||||
# ------------------------------------------------------------------
|
||||
def _download_started_callback(self, download_job: DownloadJob) -> None:
|
||||
self._logger.info(f"{download_job.source}: model download started")
|
||||
self._logger.info(f"Model download started: {download_job.source}")
|
||||
with self._lock:
|
||||
install_job = self._download_cache[download_job.source]
|
||||
install_job.status = InstallStatus.DOWNLOADING
|
||||
@@ -789,7 +810,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._signal_job_downloading(install_job)
|
||||
|
||||
def _download_complete_callback(self, download_job: DownloadJob) -> None:
|
||||
self._logger.info(f"{download_job.source}: model download complete")
|
||||
self._logger.info(f"Model download complete: {download_job.source}")
|
||||
with self._lock:
|
||||
install_job = self._download_cache[download_job.source]
|
||||
|
||||
@@ -822,7 +843,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
if not install_job:
|
||||
return
|
||||
self._downloads_changed_event.set()
|
||||
self._logger.warning(f"{download_job.source}: model download cancelled")
|
||||
self._logger.warning(f"Model download canceled: {download_job.source}")
|
||||
# if install job has already registered an error, then do not replace its status with cancelled
|
||||
if not install_job.errored:
|
||||
install_job.cancel()
|
||||
@@ -846,7 +867,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
# ------------------------------------------------------------------------------------------------
|
||||
def _signal_job_running(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.RUNNING
|
||||
self._logger.info(f"{job.source}: model installation started")
|
||||
self._logger.info(f"Model install started: {job.source}")
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_model_install_running(str(job.source))
|
||||
|
||||
@@ -874,16 +895,15 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
|
||||
def _signal_job_downloads_done(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.DOWNLOADS_DONE
|
||||
self._logger.info(f"{job.source}: all parts of this model are downloaded")
|
||||
self._logger.info(f"Model download complete: {job.source}")
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_model_install_downloads_done(str(job.source))
|
||||
|
||||
def _signal_job_completed(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.COMPLETED
|
||||
assert job.config_out
|
||||
self._logger.info(
|
||||
f"{job.source}: model installation completed. {job.local_path} registered key {job.config_out.key}"
|
||||
)
|
||||
self._logger.info(f"Model install complete: {job.source}")
|
||||
self._logger.debug(f"{job.local_path} registered key {job.config_out.key}")
|
||||
if self._event_bus:
|
||||
assert job.local_path is not None
|
||||
assert job.config_out is not None
|
||||
@@ -891,7 +911,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._event_bus.emit_model_install_completed(str(job.source), key, id=job.id)
|
||||
|
||||
def _signal_job_errored(self, job: ModelInstallJob) -> None:
|
||||
self._logger.info(f"{job.source}: model installation encountered an exception: {job.error_type}\n{job.error}")
|
||||
self._logger.info(f"Model install error: {job.source}, {job.error_type}\n{job.error}")
|
||||
if self._event_bus:
|
||||
error_type = job.error_type
|
||||
error = job.error
|
||||
@@ -900,12 +920,12 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._event_bus.emit_model_install_error(str(job.source), error_type, error, id=job.id)
|
||||
|
||||
def _signal_job_cancelled(self, job: ModelInstallJob) -> None:
|
||||
self._logger.info(f"{job.source}: model installation was cancelled")
|
||||
self._logger.info(f"Model install canceled: {job.source}")
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_model_install_cancelled(str(job.source), id=job.id)
|
||||
|
||||
@staticmethod
|
||||
def get_fetcher_from_url(url: str):
|
||||
def get_fetcher_from_url(url: str) -> ModelMetadataFetchBase:
|
||||
if re.match(r"^https?://huggingface.co/[^/]+/[^/]+$", url.lower()):
|
||||
return HuggingFaceMetadataFetch
|
||||
raise ValueError(f"Unsupported model source: '{url}'")
|
||||
|
||||
@@ -6,6 +6,7 @@ from .model_records_base import ( # noqa F401
|
||||
ModelRecordServiceBase,
|
||||
UnknownModelException,
|
||||
ModelSummary,
|
||||
ModelRecordChanges,
|
||||
ModelRecordOrderBy,
|
||||
)
|
||||
from .model_records_sql import ModelRecordServiceSQL # noqa F401
|
||||
@@ -17,5 +18,6 @@ __all__ = [
|
||||
"InvalidModelException",
|
||||
"UnknownModelException",
|
||||
"ModelSummary",
|
||||
"ModelRecordChanges",
|
||||
"ModelRecordOrderBy",
|
||||
]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
"name": "Prompt from File",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample workflow using Prompt from File node",
|
||||
"version": "0.1.0",
|
||||
"version": "2.0.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text2image, prompt from file, default",
|
||||
"notes": "",
|
||||
@@ -14,11 +14,31 @@
|
||||
{
|
||||
"nodeId": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||
"fieldName": "file_path"
|
||||
},
|
||||
{
|
||||
"nodeId": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||
"fieldName": "pre_prompt"
|
||||
},
|
||||
{
|
||||
"nodeId": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||
"fieldName": "post_prompt"
|
||||
},
|
||||
{
|
||||
"nodeId": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||
"fieldName": "width"
|
||||
},
|
||||
{
|
||||
"nodeId": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||
"fieldName": "height"
|
||||
},
|
||||
{
|
||||
"nodeId": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||
"fieldName": "board"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"category": "default",
|
||||
"version": "2.0.0"
|
||||
"version": "3.0.0"
|
||||
},
|
||||
"nodes": [
|
||||
{
|
||||
@@ -26,847 +46,361 @@
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
||||
"type": "compel",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"version": "1.1.1",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "compel",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "dcdf3f6d-9b96-4bcd-9b8d-f992fefe4f62",
|
||||
"name": "prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"clip": {
|
||||
"id": "3f1981c9-d8a9-42eb-a739-4f120eb80745",
|
||||
"name": "clip",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "46205e6c-c5e2-44cb-9c82-1cd20b95674a",
|
||||
"name": "conditioning",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 925,
|
||||
"y": -200
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||
"type": "prompt_from_file",
|
||||
"label": "Prompts from File",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.1",
|
||||
"version": "1.0.2",
|
||||
"nodePack": "invokeai",
|
||||
"label": "Prompts from File",
|
||||
"notes": "",
|
||||
"type": "prompt_from_file",
|
||||
"inputs": {
|
||||
"file_path": {
|
||||
"id": "37e37684-4f30-4ec8-beae-b333e550f904",
|
||||
"name": "file_path",
|
||||
"fieldKind": "input",
|
||||
"label": "Prompts File Path",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"pre_prompt": {
|
||||
"id": "7de02feb-819a-4992-bad3-72a30920ddea",
|
||||
"name": "pre_prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"post_prompt": {
|
||||
"id": "95f191d8-a282-428e-bd65-de8cb9b7513a",
|
||||
"name": "post_prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"start_line": {
|
||||
"id": "efee9a48-05ab-4829-8429-becfa64a0782",
|
||||
"name": "start_line",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 1
|
||||
},
|
||||
"max_prompts": {
|
||||
"id": "abebb428-3d3d-49fd-a482-4e96a16fff08",
|
||||
"name": "max_prompts",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 1
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"collection": {
|
||||
"id": "77d5d7f1-9877-4ab1-9a8c-33e9ffa9abf3",
|
||||
"name": "collection",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": true,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 475,
|
||||
"y": -400
|
||||
},
|
||||
"width": 320,
|
||||
"height": 506
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||
"type": "iterate",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.1.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "iterate",
|
||||
"inputs": {
|
||||
"collection": {
|
||||
"id": "4c564bf8-5ed6-441e-ad2c-dda265d5785f",
|
||||
"name": "collection",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": true,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "CollectionField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"item": {
|
||||
"id": "36340f9a-e7a5-4afa-b4b5-313f4e292380",
|
||||
"name": "item",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "CollectionItemField"
|
||||
}
|
||||
},
|
||||
"index": {
|
||||
"id": "1beca95a-2159-460f-97ff-c8bab7d89336",
|
||||
"name": "index",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"total": {
|
||||
"id": "ead597b8-108e-4eda-88a8-5c29fa2f8df9",
|
||||
"name": "total",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 925,
|
||||
"y": -400
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||
"type": "main_model_loader",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.2",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "main_model_loader",
|
||||
"inputs": {
|
||||
"model": {
|
||||
"id": "3f264259-3418-47d5-b90d-b6600e36ae46",
|
||||
"name": "model",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MainModelField"
|
||||
},
|
||||
"value": {
|
||||
"model_name": "stable-diffusion-v1-5",
|
||||
"base_model": "sd-1",
|
||||
"model_type": "main"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"unet": {
|
||||
"id": "8e182ea2-9d0a-4c02-9407-27819288d4b5",
|
||||
"name": "unet",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
},
|
||||
"clip": {
|
||||
"id": "d67d9d30-058c-46d5-bded-3d09d6d1aa39",
|
||||
"name": "clip",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
},
|
||||
"vae": {
|
||||
"id": "89641601-0429-4448-98d5-190822d920d8",
|
||||
"name": "vae",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 0,
|
||||
"y": -375
|
||||
},
|
||||
"width": 320,
|
||||
"height": 193
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||
"type": "compel",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"version": "1.1.1",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "compel",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "dcdf3f6d-9b96-4bcd-9b8d-f992fefe4f62",
|
||||
"name": "prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"clip": {
|
||||
"id": "3f1981c9-d8a9-42eb-a739-4f120eb80745",
|
||||
"name": "clip",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "46205e6c-c5e2-44cb-9c82-1cd20b95674a",
|
||||
"name": "conditioning",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 925,
|
||||
"y": -275
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||
"type": "noise",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.1",
|
||||
"version": "1.0.2",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "noise",
|
||||
"inputs": {
|
||||
"seed": {
|
||||
"id": "b722d84a-eeee-484f-bef2-0250c027cb67",
|
||||
"name": "seed",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"width": {
|
||||
"id": "d5f8ce11-0502-4bfc-9a30-5757dddf1f94",
|
||||
"name": "width",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
},
|
||||
"height": {
|
||||
"id": "f187d5ff-38a5-4c3f-b780-fc5801ef34af",
|
||||
"name": "height",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
},
|
||||
"use_cpu": {
|
||||
"id": "12f112b8-8b76-4816-b79e-662edc9f9aa5",
|
||||
"name": "use_cpu",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": true
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"noise": {
|
||||
"id": "08576ad1-96d9-42d2-96ef-6f5c1961933f",
|
||||
"name": "noise",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "f3e1f94a-258d-41ff-9789-bd999bd9f40d",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "6cefc357-4339-415e-a951-49b9c2be32f4",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 925,
|
||||
"y": 25
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||
"type": "rand_int",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.1",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "rand_int",
|
||||
"inputs": {
|
||||
"low": {
|
||||
"id": "b9fc6cf1-469c-4037-9bf0-04836965826f",
|
||||
"name": "low",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"id": "06eac725-0f60-4ba2-b8cd-7ad9f757488c",
|
||||
"name": "high",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 2147483647
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"value": {
|
||||
"id": "df08c84e-7346-4e92-9042-9e5cb773aaff",
|
||||
"name": "value",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": false
|
||||
},
|
||||
"position": {
|
||||
"x": 925,
|
||||
"y": -50
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||
"type": "l2i",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.2.0",
|
||||
"version": "1.2.2",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "l2i",
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"id": "022e4b33-562b-438d-b7df-41c3fd931f40",
|
||||
"name": "metadata",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MetadataField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"id": "67cb6c77-a394-4a66-a6a9-a0a7dcca69ec",
|
||||
"name": "latents",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"vae": {
|
||||
"id": "7b3fd9ad-a4ef-4e04-89fa-3832a9902dbd",
|
||||
"name": "vae",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"tiled": {
|
||||
"id": "5ac5680d-3add-4115-8ec0-9ef5bb87493b",
|
||||
"name": "tiled",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": false
|
||||
},
|
||||
"fp32": {
|
||||
"id": "db8297f5-55f8-452f-98cf-6572c2582152",
|
||||
"name": "fp32",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": false
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"image": {
|
||||
"id": "d8778d0c-592a-4960-9280-4e77e00a7f33",
|
||||
"name": "image",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ImageField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "c8b0a75a-f5de-4ff2-9227-f25bb2b97bec",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "83c05fbf-76b9-49ab-93c4-fa4b10e793e4",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 2037.861329274915,
|
||||
"y": -329.8393457509562
|
||||
},
|
||||
"width": 320,
|
||||
"height": 224
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||
"type": "denoise_latents",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.5.1",
|
||||
"version": "1.5.3",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "denoise_latents",
|
||||
"inputs": {
|
||||
"positive_conditioning": {
|
||||
"id": "751fb35b-3f23-45ce-af1c-053e74251337",
|
||||
"name": "positive_conditioning",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"negative_conditioning": {
|
||||
"id": "b9dc06b6-7481-4db1-a8c2-39d22a5eacff",
|
||||
"name": "negative_conditioning",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"noise": {
|
||||
"id": "6e15e439-3390-48a4-8031-01e0e19f0e1d",
|
||||
"name": "noise",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"steps": {
|
||||
"id": "bfdfb3df-760b-4d51-b17b-0abb38b976c2",
|
||||
"name": "steps",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 10
|
||||
"value": 30
|
||||
},
|
||||
"cfg_scale": {
|
||||
"id": "47770858-322e-41af-8494-d8b63ed735f3",
|
||||
"name": "cfg_scale",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 7.5
|
||||
},
|
||||
"denoising_start": {
|
||||
"id": "2ba78720-ee02-4130-a348-7bc3531f790b",
|
||||
"name": "denoising_start",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"denoising_end": {
|
||||
"id": "a874dffb-d433-4d1a-9f59-af4367bb05e4",
|
||||
"name": "denoising_end",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 1
|
||||
},
|
||||
"scheduler": {
|
||||
"id": "36e021ad-b762-4fe4-ad4d-17f0291c40b2",
|
||||
"name": "scheduler",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "SchedulerField"
|
||||
},
|
||||
"value": "euler"
|
||||
},
|
||||
"unet": {
|
||||
"id": "98d3282d-f9f6-4b5e-b9e8-58658f1cac78",
|
||||
"name": "unet",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"control": {
|
||||
"id": "f2ea3216-43d5-42b4-887f-36e8f7166d53",
|
||||
"name": "control",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "ControlField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"ip_adapter": {
|
||||
"id": "d0780610-a298-47c8-a54e-70e769e0dfe2",
|
||||
"name": "ip_adapter",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "IPAdapterField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"t2i_adapter": {
|
||||
"id": "fdb40970-185e-4ea8-8bb5-88f06f91f46a",
|
||||
"name": "t2i_adapter",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "T2IAdapterField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"cfg_rescale_multiplier": {
|
||||
"id": "3af2d8c5-de83-425c-a100-49cb0f1f4385",
|
||||
"name": "cfg_rescale_multiplier",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"latents": {
|
||||
"id": "e05b538a-1b5a-4aa5-84b1-fd2361289a81",
|
||||
"name": "latents",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"denoise_mask": {
|
||||
"id": "463a419e-df30-4382-8ffb-b25b25abe425",
|
||||
"name": "denoise_mask",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "DenoiseMaskField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"latents": {
|
||||
"id": "559ee688-66cf-4139-8b82-3d3aa69995ce",
|
||||
"name": "latents",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "0b4285c2-e8b9-48e5-98f6-0a49d3f98fd2",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "8b0881b9-45e5-47d5-b526-24b6661de0ee",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 1570.9941088179146,
|
||||
"y": -407.6505491604564
|
||||
},
|
||||
"width": 320,
|
||||
"height": 612
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
@@ -876,12 +410,6 @@
|
||||
"source": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6"
|
||||
},
|
||||
{
|
||||
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77-collapsed",
|
||||
"type": "collapsed",
|
||||
"source": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||
"target": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-1b7e0df8-8589-4915-a4ea-c0088f15d642collection-1b89067c-3f6b-42c8-991f-e3055789b251collection",
|
||||
"type": "default",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
"name": "Text to Image - SD1.5",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample text to image workflow for Stable Diffusion 1.5/2",
|
||||
"version": "1.1.0",
|
||||
"version": "2.0.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text2image, SD1.5, SD2, default",
|
||||
"notes": "",
|
||||
@@ -26,11 +26,15 @@
|
||||
{
|
||||
"nodeId": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"fieldName": "height"
|
||||
},
|
||||
{
|
||||
"nodeId": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
"fieldName": "board"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"category": "default",
|
||||
"version": "2.0.0"
|
||||
"version": "3.0.0"
|
||||
},
|
||||
"nodes": [
|
||||
{
|
||||
@@ -38,687 +42,291 @@
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||
"type": "compel",
|
||||
"label": "Negative Compel Prompt",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"version": "1.1.1",
|
||||
"nodePack": "invokeai",
|
||||
"label": "Negative Compel Prompt",
|
||||
"notes": "",
|
||||
"type": "compel",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "7739aff6-26cb-4016-8897-5a1fb2305e4e",
|
||||
"name": "prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "Negative Prompt",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"clip": {
|
||||
"id": "48d23dce-a6ae-472a-9f8c-22a714ea5ce0",
|
||||
"name": "clip",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "37cf3a9d-f6b7-4b64-8ff6-2558c5ecc447",
|
||||
"name": "conditioning",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 1000,
|
||||
"y": 350
|
||||
},
|
||||
"width": 320,
|
||||
"height": 219
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"type": "noise",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.1",
|
||||
"version": "1.0.2",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "noise",
|
||||
"inputs": {
|
||||
"seed": {
|
||||
"id": "6431737c-918a-425d-a3b4-5d57e2f35d4d",
|
||||
"name": "seed",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"width": {
|
||||
"id": "38fc5b66-fe6e-47c8-bba9-daf58e454ed7",
|
||||
"name": "width",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
},
|
||||
"height": {
|
||||
"id": "16298330-e2bf-4872-a514-d6923df53cbb",
|
||||
"name": "height",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
"value": 768
|
||||
},
|
||||
"use_cpu": {
|
||||
"id": "c7c436d3-7a7a-4e76-91e4-c6deb271623c",
|
||||
"name": "use_cpu",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": true
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"noise": {
|
||||
"id": "50f650dc-0184-4e23-a927-0497a96fe954",
|
||||
"name": "noise",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "bb8a452b-133d-42d1-ae4a-3843d7e4109a",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "35cfaa12-3b8b-4b7a-a884-327ff3abddd9",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 600,
|
||||
"y": 325
|
||||
},
|
||||
"width": 320,
|
||||
"height": 388
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"type": "main_model_loader",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.2",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "main_model_loader",
|
||||
"inputs": {
|
||||
"model": {
|
||||
"id": "993eabd2-40fd-44fe-bce7-5d0c7075ddab",
|
||||
"name": "model",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MainModelField"
|
||||
},
|
||||
"value": {
|
||||
"model_name": "stable-diffusion-v1-5",
|
||||
"base_model": "sd-1",
|
||||
"model_type": "main"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"unet": {
|
||||
"id": "5c18c9db-328d-46d0-8cb9-143391c410be",
|
||||
"name": "unet",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
},
|
||||
"clip": {
|
||||
"id": "6effcac0-ec2f-4bf5-a49e-a2c29cf921f4",
|
||||
"name": "clip",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
},
|
||||
"vae": {
|
||||
"id": "57683ba3-f5f5-4f58-b9a2-4b83dacad4a1",
|
||||
"name": "vae",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 600,
|
||||
"y": 25
|
||||
},
|
||||
"width": 320,
|
||||
"height": 193
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||
"type": "compel",
|
||||
"label": "Positive Compel Prompt",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"version": "1.1.1",
|
||||
"nodePack": "invokeai",
|
||||
"label": "Positive Compel Prompt",
|
||||
"notes": "",
|
||||
"type": "compel",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "7739aff6-26cb-4016-8897-5a1fb2305e4e",
|
||||
"name": "prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "Positive Prompt",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": "Super cute tiger cub, national geographic award-winning photograph"
|
||||
},
|
||||
"clip": {
|
||||
"id": "48d23dce-a6ae-472a-9f8c-22a714ea5ce0",
|
||||
"name": "clip",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "37cf3a9d-f6b7-4b64-8ff6-2558c5ecc447",
|
||||
"name": "conditioning",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 1000,
|
||||
"y": 25
|
||||
},
|
||||
"width": 320,
|
||||
"height": 219
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
"type": "rand_int",
|
||||
"label": "Random Seed",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.1",
|
||||
"nodePack": "invokeai",
|
||||
"label": "Random Seed",
|
||||
"notes": "",
|
||||
"type": "rand_int",
|
||||
"inputs": {
|
||||
"low": {
|
||||
"id": "3ec65a37-60ba-4b6c-a0b2-553dd7a84b84",
|
||||
"name": "low",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"id": "085f853a-1a5f-494d-8bec-e4ba29a3f2d1",
|
||||
"name": "high",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 2147483647
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"value": {
|
||||
"id": "812ade4d-7699-4261-b9fc-a6c9d2ab55ee",
|
||||
"name": "value",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": false
|
||||
},
|
||||
"position": {
|
||||
"x": 600,
|
||||
"y": 275
|
||||
},
|
||||
"width": 320,
|
||||
"height": 32
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "denoise_latents",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.5.1",
|
||||
"version": "1.5.3",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "denoise_latents",
|
||||
"inputs": {
|
||||
"positive_conditioning": {
|
||||
"id": "90b7f4f8-ada7-4028-8100-d2e54f192052",
|
||||
"name": "positive_conditioning",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"negative_conditioning": {
|
||||
"id": "9393779e-796c-4f64-b740-902a1177bf53",
|
||||
"name": "negative_conditioning",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"noise": {
|
||||
"id": "8e17f1e5-4f98-40b1-b7f4-86aeeb4554c1",
|
||||
"name": "noise",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"steps": {
|
||||
"id": "9b63302d-6bd2-42c9-ac13-9b1afb51af88",
|
||||
"name": "steps",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 50
|
||||
"value": 30
|
||||
},
|
||||
"cfg_scale": {
|
||||
"id": "87dd04d3-870e-49e1-98bf-af003a810109",
|
||||
"name": "cfg_scale",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 7.5
|
||||
},
|
||||
"denoising_start": {
|
||||
"id": "f369d80f-4931-4740-9bcd-9f0620719fab",
|
||||
"name": "denoising_start",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"denoising_end": {
|
||||
"id": "747d10e5-6f02-445c-994c-0604d814de8c",
|
||||
"name": "denoising_end",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 1
|
||||
},
|
||||
"scheduler": {
|
||||
"id": "1de84a4e-3a24-4ec8-862b-16ce49633b9b",
|
||||
"name": "scheduler",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "SchedulerField"
|
||||
},
|
||||
"value": "unipc"
|
||||
"value": "dpmpp_sde_k"
|
||||
},
|
||||
"unet": {
|
||||
"id": "ffa6fef4-3ce2-4bdb-9296-9a834849489b",
|
||||
"name": "unet",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"control": {
|
||||
"id": "077b64cb-34be-4fcc-83f2-e399807a02bd",
|
||||
"name": "control",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "ControlField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"ip_adapter": {
|
||||
"id": "1d6948f7-3a65-4a65-a20c-768b287251aa",
|
||||
"name": "ip_adapter",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "IPAdapterField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"t2i_adapter": {
|
||||
"id": "75e67b09-952f-4083-aaf4-6b804d690412",
|
||||
"name": "t2i_adapter",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "T2IAdapterField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"cfg_rescale_multiplier": {
|
||||
"id": "9101f0a6-5fe0-4826-b7b3-47e5d506826c",
|
||||
"name": "cfg_rescale_multiplier",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"latents": {
|
||||
"id": "334d4ba3-5a99-4195-82c5-86fb3f4f7d43",
|
||||
"name": "latents",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"denoise_mask": {
|
||||
"id": "0d3dbdbf-b014-4e95-8b18-ff2ff9cb0bfa",
|
||||
"name": "denoise_mask",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "DenoiseMaskField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"latents": {
|
||||
"id": "70fa5bbc-0c38-41bb-861a-74d6d78d2f38",
|
||||
"name": "latents",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "98ee0e6c-82aa-4e8f-8be5-dc5f00ee47f0",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "e8cb184a-5e1a-47c8-9695-4b8979564f5d",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 1400,
|
||||
"y": 25
|
||||
},
|
||||
"width": 320,
|
||||
"height": 612
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
"type": "l2i",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"version": "1.2.0",
|
||||
"version": "1.2.2",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "l2i",
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"id": "ab375f12-0042-4410-9182-29e30db82c85",
|
||||
"name": "metadata",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MetadataField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"id": "3a7e7efd-bff5-47d7-9d48-615127afee78",
|
||||
"name": "latents",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"vae": {
|
||||
"id": "a1f5f7a1-0795-4d58-b036-7820c0b0ef2b",
|
||||
"name": "vae",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"tiled": {
|
||||
"id": "da52059a-0cee-4668-942f-519aa794d739",
|
||||
"name": "tiled",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": false
|
||||
},
|
||||
"fp32": {
|
||||
"id": "c4841df3-b24e-4140-be3b-ccd454c2522c",
|
||||
"name": "fp32",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": true
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"image": {
|
||||
"id": "72d667d0-cf85-459d-abf2-28bd8b823fe7",
|
||||
"name": "image",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ImageField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "c8c907d8-1066-49d1-b9a6-83bdcd53addc",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "230f359c-b4ea-436c-b372-332d7dcdca85",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": false,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 1800,
|
||||
"y": 25
|
||||
},
|
||||
"width": 320,
|
||||
"height": 224
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
|
||||
@@ -0,0 +1,704 @@
|
||||
{
|
||||
"name": "Text to Image - SDXL",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample text to image workflow for SDXL",
|
||||
"version": "2.0.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text2image, SDXL, default",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
"nodeId": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
||||
"fieldName": "value"
|
||||
},
|
||||
{
|
||||
"nodeId": "719dabe8-8297-4749-aea1-37be301cd425",
|
||||
"fieldName": "value"
|
||||
},
|
||||
{
|
||||
"nodeId": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
"fieldName": "model"
|
||||
},
|
||||
{
|
||||
"nodeId": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8",
|
||||
"fieldName": "vae_model"
|
||||
},
|
||||
{
|
||||
"nodeId": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
||||
"fieldName": "board"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"category": "default",
|
||||
"version": "3.0.0"
|
||||
},
|
||||
"nodes": [
|
||||
{
|
||||
"id": "3774ec24-a69e-4254-864c-097d07a6256f",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "3774ec24-a69e-4254-864c-097d07a6256f",
|
||||
"version": "1.0.1",
|
||||
"label": "Positive Style Concat",
|
||||
"notes": "",
|
||||
"type": "string_join",
|
||||
"inputs": {
|
||||
"string_left": {
|
||||
"name": "string_left",
|
||||
"label": "",
|
||||
"value": ""
|
||||
},
|
||||
"string_right": {
|
||||
"name": "string_right",
|
||||
"label": "Positive Style Concat",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"isOpen": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 750,
|
||||
"y": -225
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "719dabe8-8297-4749-aea1-37be301cd425",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "719dabe8-8297-4749-aea1-37be301cd425",
|
||||
"version": "1.0.1",
|
||||
"label": "Negative Prompt",
|
||||
"notes": "",
|
||||
"type": "string",
|
||||
"inputs": {
|
||||
"value": {
|
||||
"name": "value",
|
||||
"label": "Negative Prompt",
|
||||
"value": "photograph"
|
||||
}
|
||||
},
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 750,
|
||||
"y": -125
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
"version": "1.1.1",
|
||||
"nodePack": "invokeai",
|
||||
"label": "SDXL Negative Compel Prompt",
|
||||
"notes": "",
|
||||
"type": "sdxl_compel_prompt",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"label": "Negative Prompt",
|
||||
"value": ""
|
||||
},
|
||||
"style": {
|
||||
"name": "style",
|
||||
"label": "Negative Style",
|
||||
"value": ""
|
||||
},
|
||||
"original_width": {
|
||||
"name": "original_width",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"original_height": {
|
||||
"name": "original_height",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"crop_top": {
|
||||
"name": "crop_top",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"crop_left": {
|
||||
"name": "crop_left",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"target_width": {
|
||||
"name": "target_width",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"target_height": {
|
||||
"name": "target_height",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"clip": {
|
||||
"name": "clip",
|
||||
"label": ""
|
||||
},
|
||||
"clip2": {
|
||||
"name": "clip2",
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"isOpen": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 750,
|
||||
"y": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"version": "1.0.2",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "noise",
|
||||
"inputs": {
|
||||
"seed": {
|
||||
"name": "seed",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"width": {
|
||||
"name": "width",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"height": {
|
||||
"name": "height",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"use_cpu": {
|
||||
"name": "use_cpu",
|
||||
"label": "",
|
||||
"value": true
|
||||
}
|
||||
},
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 375,
|
||||
"y": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
"version": "1.0.1",
|
||||
"nodePack": "invokeai",
|
||||
"label": "Random Seed",
|
||||
"notes": "",
|
||||
"type": "rand_int",
|
||||
"inputs": {
|
||||
"low": {
|
||||
"name": "low",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"name": "high",
|
||||
"label": "",
|
||||
"value": 2147483647
|
||||
}
|
||||
},
|
||||
"isOpen": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": false
|
||||
},
|
||||
"position": {
|
||||
"x": 375,
|
||||
"y": -50
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
"version": "1.0.2",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "sdxl_model_loader",
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": "",
|
||||
"value": {
|
||||
"key": "4a63b226-e8ff-4da4-854e-0b9f04b562ba",
|
||||
"hash": "blake3:d279309ea6e5ee6e8fd52504275865cc280dac71cbf528c5b07c98b888bddaba",
|
||||
"name": "dreamshaper-xl-v2-turbo",
|
||||
"base": "sdxl",
|
||||
"type": "main"
|
||||
}
|
||||
}
|
||||
},
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 375,
|
||||
"y": -500
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
"version": "1.1.1",
|
||||
"nodePack": "invokeai",
|
||||
"label": "SDXL Positive Compel Prompt",
|
||||
"notes": "",
|
||||
"type": "sdxl_compel_prompt",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"label": "Positive Prompt",
|
||||
"value": ""
|
||||
},
|
||||
"style": {
|
||||
"name": "style",
|
||||
"label": "Positive Style",
|
||||
"value": ""
|
||||
},
|
||||
"original_width": {
|
||||
"name": "original_width",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"original_height": {
|
||||
"name": "original_height",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"crop_top": {
|
||||
"name": "crop_top",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"crop_left": {
|
||||
"name": "crop_left",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"target_width": {
|
||||
"name": "target_width",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"target_height": {
|
||||
"name": "target_height",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"clip": {
|
||||
"name": "clip",
|
||||
"label": ""
|
||||
},
|
||||
"clip2": {
|
||||
"name": "clip2",
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"isOpen": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 750,
|
||||
"y": -175
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
||||
"version": "1.2.2",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "l2i",
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"name": "latents",
|
||||
"label": ""
|
||||
},
|
||||
"vae": {
|
||||
"name": "vae",
|
||||
"label": ""
|
||||
},
|
||||
"tiled": {
|
||||
"name": "tiled",
|
||||
"label": "",
|
||||
"value": false
|
||||
},
|
||||
"fp32": {
|
||||
"name": "fp32",
|
||||
"label": "",
|
||||
"value": false
|
||||
}
|
||||
},
|
||||
"isOpen": true,
|
||||
"isIntermediate": false,
|
||||
"useCache": false
|
||||
},
|
||||
"position": {
|
||||
"x": 1475,
|
||||
"y": -500
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||
"version": "1.5.3",
|
||||
"nodePack": "invokeai",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "denoise_latents",
|
||||
"inputs": {
|
||||
"positive_conditioning": {
|
||||
"name": "positive_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"negative_conditioning": {
|
||||
"name": "negative_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"noise": {
|
||||
"name": "noise",
|
||||
"label": ""
|
||||
},
|
||||
"steps": {
|
||||
"name": "steps",
|
||||
"label": "",
|
||||
"value": 32
|
||||
},
|
||||
"cfg_scale": {
|
||||
"name": "cfg_scale",
|
||||
"label": "",
|
||||
"value": 6
|
||||
},
|
||||
"denoising_start": {
|
||||
"name": "denoising_start",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"denoising_end": {
|
||||
"name": "denoising_end",
|
||||
"label": "",
|
||||
"value": 1
|
||||
},
|
||||
"scheduler": {
|
||||
"name": "scheduler",
|
||||
"label": "",
|
||||
"value": "dpmpp_2m_sde_k"
|
||||
},
|
||||
"unet": {
|
||||
"name": "unet",
|
||||
"label": ""
|
||||
},
|
||||
"control": {
|
||||
"name": "control",
|
||||
"label": ""
|
||||
},
|
||||
"ip_adapter": {
|
||||
"name": "ip_adapter",
|
||||
"label": ""
|
||||
},
|
||||
"t2i_adapter": {
|
||||
"name": "t2i_adapter",
|
||||
"label": ""
|
||||
},
|
||||
"cfg_rescale_multiplier": {
|
||||
"name": "cfg_rescale_multiplier",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"latents": {
|
||||
"name": "latents",
|
||||
"label": ""
|
||||
},
|
||||
"denoise_mask": {
|
||||
"name": "denoise_mask",
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 1125,
|
||||
"y": -500
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8",
|
||||
"version": "1.0.2",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"type": "vae_loader",
|
||||
"inputs": {
|
||||
"vae_model": {
|
||||
"name": "vae_model",
|
||||
"label": "VAE (use the FP16 model)",
|
||||
"value": {
|
||||
"key": "f20f9e5c-1bce-4c46-a84d-34ebfa7df069",
|
||||
"hash": "blake3:9705ab1c31fa96b308734214fb7571a958621c7a9247eed82b7d277145f8d9fa",
|
||||
"name": "sdxl-vae-fp16-fix",
|
||||
"base": "sdxl",
|
||||
"type": "vae"
|
||||
}
|
||||
}
|
||||
},
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 375,
|
||||
"y": -225
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
||||
"version": "1.0.1",
|
||||
"label": "Positive Prompt",
|
||||
"notes": "",
|
||||
"type": "string",
|
||||
"inputs": {
|
||||
"value": {
|
||||
"name": "value",
|
||||
"label": "Positive Prompt",
|
||||
"value": "Super cute tiger cub, fierce, traditional chinese watercolor"
|
||||
}
|
||||
},
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 750,
|
||||
"y": -500
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
||||
"version": "1.0.1",
|
||||
"label": "Negative Style Concat",
|
||||
"notes": "",
|
||||
"type": "string_join",
|
||||
"inputs": {
|
||||
"string_left": {
|
||||
"name": "string_left",
|
||||
"label": "",
|
||||
"value": ""
|
||||
},
|
||||
"string_right": {
|
||||
"name": "string_right",
|
||||
"label": "Negative Style Prompt",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"isOpen": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 750,
|
||||
"y": 150
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "3774ec24-a69e-4254-864c-097d07a6256f-faf965a4-7530-427b-b1f3-4ba6505c2a08-collapsed",
|
||||
"type": "collapsed",
|
||||
"source": "3774ec24-a69e-4254-864c-097d07a6256f",
|
||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08"
|
||||
},
|
||||
{
|
||||
"id": "ad8fa655-3a76-43d0-9c02-4d7644dea650-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204-collapsed",
|
||||
"type": "collapsed",
|
||||
"source": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
|
||||
"type": "default",
|
||||
"source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
"target": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip-faf965a4-7530-427b-b1f3-4ba6505c2a08clip",
|
||||
"type": "default",
|
||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip2-faf965a4-7530-427b-b1f3-4ba6505c2a08clip2",
|
||||
"type": "default",
|
||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
"sourceHandle": "clip2",
|
||||
"targetHandle": "clip2"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204clip",
|
||||
"type": "default",
|
||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip2-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204clip2",
|
||||
"type": "default",
|
||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
"sourceHandle": "clip2",
|
||||
"targetHandle": "clip2"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22unet-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbunet",
|
||||
"type": "default",
|
||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||
"sourceHandle": "unet",
|
||||
"targetHandle": "unet"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-faf965a4-7530-427b-b1f3-4ba6505c2a08conditioning-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbpositive_conditioning",
|
||||
"type": "default",
|
||||
"source": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204conditioning-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbnegative_conditioning",
|
||||
"type": "default",
|
||||
"source": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "negative_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbnoise",
|
||||
"type": "default",
|
||||
"source": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||
"sourceHandle": "noise",
|
||||
"targetHandle": "noise"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfblatents-63e91020-83b2-4f35-b174-ad9692aabb48latents",
|
||||
"type": "default",
|
||||
"source": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||
"target": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-0093692f-9cf4-454d-a5b8-62f0e3eb3bb8vae-63e91020-83b2-4f35-b174-ad9692aabb48vae",
|
||||
"type": "default",
|
||||
"source": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8",
|
||||
"target": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-ade2c0d3-0384-4157-b39b-29ce429cfa15value-faf965a4-7530-427b-b1f3-4ba6505c2a08prompt",
|
||||
"type": "default",
|
||||
"source": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "prompt"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-719dabe8-8297-4749-aea1-37be301cd425value-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204prompt",
|
||||
"type": "default",
|
||||
"source": "719dabe8-8297-4749-aea1-37be301cd425",
|
||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "prompt"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-719dabe8-8297-4749-aea1-37be301cd425value-ad8fa655-3a76-43d0-9c02-4d7644dea650string_left",
|
||||
"type": "default",
|
||||
"source": "719dabe8-8297-4749-aea1-37be301cd425",
|
||||
"target": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "string_left"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-ad8fa655-3a76-43d0-9c02-4d7644dea650value-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204style",
|
||||
"type": "default",
|
||||
"source": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "style"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-ade2c0d3-0384-4157-b39b-29ce429cfa15value-3774ec24-a69e-4254-864c-097d07a6256fstring_left",
|
||||
"type": "default",
|
||||
"source": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
||||
"target": "3774ec24-a69e-4254-864c-097d07a6256f",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "string_left"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3774ec24-a69e-4254-864c-097d07a6256fvalue-faf965a4-7530-427b-b1f3-4ba6505c2a08style",
|
||||
"type": "default",
|
||||
"source": "3774ec24-a69e-4254-864c-097d07a6256f",
|
||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "style"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
"name": "Text to Image with LoRA",
|
||||
"author": "InvokeAI",
|
||||
"description": "Simple text to image workflow with a LoRA",
|
||||
"version": "1.0.0",
|
||||
"version": "2.0.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text to image, lora, default",
|
||||
"notes": "",
|
||||
@@ -22,11 +22,23 @@
|
||||
{
|
||||
"nodeId": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
||||
"fieldName": "prompt"
|
||||
},
|
||||
{
|
||||
"nodeId": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
||||
"fieldName": "width"
|
||||
},
|
||||
{
|
||||
"nodeId": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
||||
"fieldName": "height"
|
||||
},
|
||||
{
|
||||
"nodeId": "a9683c0a-6b1f-4a5e-8187-c57e764b3400",
|
||||
"fieldName": "board"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"category": "default",
|
||||
"version": "2.0.0"
|
||||
"version": "3.0.0"
|
||||
},
|
||||
"nodes": [
|
||||
{
|
||||
@@ -34,773 +46,321 @@
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "85b77bb2-c67a-416a-b3e8-291abe746c44",
|
||||
"type": "compel",
|
||||
"version": "1.1.1",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"type": "compel",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "39fe92c4-38eb-4cc7-bf5e-cbcd31847b11",
|
||||
"name": "prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "Negative Prompt",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"clip": {
|
||||
"id": "14313164-e5c4-4e40-a599-41b614fe3690",
|
||||
"name": "clip",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "02140b9d-50f3-470b-a0b7-01fc6ed2dcd6",
|
||||
"name": "conditioning",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 3425,
|
||||
"y": -300
|
||||
},
|
||||
"width": 320,
|
||||
"height": 219
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
||||
"type": "main_model_loader",
|
||||
"version": "1.0.2",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"type": "main_model_loader",
|
||||
"inputs": {
|
||||
"model": {
|
||||
"id": "e2e1c177-ae39-4244-920e-d621fa156a24",
|
||||
"name": "model",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MainModelField"
|
||||
},
|
||||
"value": {
|
||||
"model_name": "Analog-Diffusion",
|
||||
"base_model": "sd-1",
|
||||
"model_type": "main"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"vae": {
|
||||
"id": "f91410e8-9378-4298-b285-f0f40ffd9825",
|
||||
"name": "vae",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
},
|
||||
"clip": {
|
||||
"id": "928d91bf-de0c-44a8-b0c8-4de0e2e5b438",
|
||||
"name": "clip",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
},
|
||||
"unet": {
|
||||
"id": "eacaf530-4e7e-472e-b904-462192189fc1",
|
||||
"name": "unet",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 2500,
|
||||
"y": -600
|
||||
},
|
||||
"width": 320,
|
||||
"height": 193
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||
"type": "lora_loader",
|
||||
"version": "1.0.2",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"type": "lora_loader",
|
||||
"inputs": {
|
||||
"lora": {
|
||||
"id": "36d867e8-92ea-4c3f-9ad5-ba05c64cf326",
|
||||
"name": "lora",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LoRAModelField"
|
||||
},
|
||||
"value": {
|
||||
"model_name": "Ink scenery",
|
||||
"base_model": "sd-1"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"weight": {
|
||||
"id": "8be86540-ba81-49b3-b394-2b18fa70b867",
|
||||
"name": "weight",
|
||||
"fieldKind": "input",
|
||||
"label": "LoRA Weight",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0.75
|
||||
"value": 1
|
||||
},
|
||||
"unet": {
|
||||
"id": "9c4d5668-e9e1-411b-8f4b-e71115bc4a01",
|
||||
"name": "unet",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"clip": {
|
||||
"id": "918ec00e-e76f-4ad0-aee1-3927298cf03b",
|
||||
"name": "clip",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"unet": {
|
||||
"id": "c63f7825-1bcf-451d-b7a7-aa79f5c77416",
|
||||
"name": "unet",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
},
|
||||
"clip": {
|
||||
"id": "6f79ef2d-00f7-4917-bee3-53e845bf4192",
|
||||
"name": "clip",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 2975,
|
||||
"y": -600
|
||||
},
|
||||
"width": 320,
|
||||
"height": 218
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
||||
"type": "compel",
|
||||
"version": "1.1.1",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"type": "compel",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "39fe92c4-38eb-4cc7-bf5e-cbcd31847b11",
|
||||
"name": "prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "Positive Prompt",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": "cute tiger cub"
|
||||
"value": "super cute tiger cub"
|
||||
},
|
||||
"clip": {
|
||||
"id": "14313164-e5c4-4e40-a599-41b614fe3690",
|
||||
"name": "clip",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "02140b9d-50f3-470b-a0b7-01fc6ed2dcd6",
|
||||
"name": "conditioning",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 3425,
|
||||
"y": -575
|
||||
},
|
||||
"width": 320,
|
||||
"height": 219
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||
"type": "denoise_latents",
|
||||
"version": "1.5.3",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.5.1",
|
||||
"type": "denoise_latents",
|
||||
"inputs": {
|
||||
"positive_conditioning": {
|
||||
"id": "025ff44b-c4c6-4339-91b4-5f461e2cadc5",
|
||||
"name": "positive_conditioning",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"negative_conditioning": {
|
||||
"id": "2d92b45a-a7fb-4541-9a47-7c7495f50f54",
|
||||
"name": "negative_conditioning",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"noise": {
|
||||
"id": "4d0deeff-24ed-4562-a1ca-7833c0649377",
|
||||
"name": "noise",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"steps": {
|
||||
"id": "c9907328-aece-4af9-8a95-211b4f99a325",
|
||||
"name": "steps",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 10
|
||||
"value": 30
|
||||
},
|
||||
"cfg_scale": {
|
||||
"id": "7cf0f031-2078-49f4-9273-bb3a64ad7130",
|
||||
"name": "cfg_scale",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 7.5
|
||||
},
|
||||
"denoising_start": {
|
||||
"id": "44cec3ba-b404-4b51-ba98-add9d783279e",
|
||||
"name": "denoising_start",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"denoising_end": {
|
||||
"id": "3e7975f3-e438-4a13-8a14-395eba1fb7cd",
|
||||
"name": "denoising_end",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 1
|
||||
},
|
||||
"scheduler": {
|
||||
"id": "a6f6509b-7bb4-477d-b5fb-74baefa38111",
|
||||
"name": "scheduler",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "SchedulerField"
|
||||
},
|
||||
"value": "euler"
|
||||
},
|
||||
"unet": {
|
||||
"id": "5a87617a-b09f-417b-9b75-0cea4c255227",
|
||||
"name": "unet",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"control": {
|
||||
"id": "db87aace-ace8-4f2a-8f2b-1f752389fa9b",
|
||||
"name": "control",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "ControlField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"ip_adapter": {
|
||||
"id": "f0c133ed-4d6d-4567-bb9a-b1779810993c",
|
||||
"name": "ip_adapter",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "IPAdapterField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"t2i_adapter": {
|
||||
"id": "59ee1233-887f-45e7-aa14-cbad5f6cb77f",
|
||||
"name": "t2i_adapter",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "T2IAdapterField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"cfg_rescale_multiplier": {
|
||||
"id": "1a12e781-4b30-4707-b432-18c31866b5c3",
|
||||
"name": "cfg_rescale_multiplier",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"latents": {
|
||||
"id": "d0e593ae-305c-424b-9acd-3af830085832",
|
||||
"name": "latents",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"denoise_mask": {
|
||||
"id": "b81b5a79-fc2b-4011-aae6-64c92bae59a7",
|
||||
"name": "denoise_mask",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "DenoiseMaskField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"latents": {
|
||||
"id": "9ae4022a-548e-407e-90cf-cc5ca5ff8a21",
|
||||
"name": "latents",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "730ba4bd-2c52-46bb-8c87-9b3aec155576",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "52b98f0b-b5ff-41b5-acc7-d0b1d1011a6f",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 3975,
|
||||
"y": -575
|
||||
},
|
||||
"width": 320,
|
||||
"height": 612
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
||||
"type": "noise",
|
||||
"version": "1.0.2",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.1",
|
||||
"type": "noise",
|
||||
"inputs": {
|
||||
"seed": {
|
||||
"id": "446ac80c-ba0a-4fea-a2d7-21128f52e5bf",
|
||||
"name": "seed",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"width": {
|
||||
"id": "779831b3-20b4-4f5f-9de7-d17de57288d8",
|
||||
"name": "width",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
},
|
||||
"height": {
|
||||
"id": "08959766-6d67-4276-b122-e54b911f2316",
|
||||
"name": "height",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
"value": 768
|
||||
},
|
||||
"use_cpu": {
|
||||
"id": "53b36a98-00c4-4dc5-97a4-ef3432c0a805",
|
||||
"name": "use_cpu",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": true
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"noise": {
|
||||
"id": "eed95824-580b-442f-aa35-c073733cecce",
|
||||
"name": "noise",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "7985a261-dfee-47a8-908a-c5a8754f5dc4",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "3d00f6c1-84b0-4262-83d9-3bf755babeea",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 3425,
|
||||
"y": 75
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953",
|
||||
"type": "rand_int",
|
||||
"version": "1.0.1",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"version": "1.0.0",
|
||||
"type": "rand_int",
|
||||
"inputs": {
|
||||
"low": {
|
||||
"id": "d25305f3-bfd6-446c-8e2c-0b025ec9e9ad",
|
||||
"name": "low",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"id": "10376a3d-b8fe-4a51-b81a-ea46d8c12c78",
|
||||
"name": "high",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 2147483647
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"value": {
|
||||
"id": "c64878fa-53b1-4202-b88a-cfb854216a57",
|
||||
"name": "value",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": false
|
||||
},
|
||||
"position": {
|
||||
"x": 3425,
|
||||
"y": 0
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "a9683c0a-6b1f-4a5e-8187-c57e764b3400",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "a9683c0a-6b1f-4a5e-8187-c57e764b3400",
|
||||
"type": "l2i",
|
||||
"version": "1.2.2",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"version": "1.2.0",
|
||||
"type": "l2i",
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"id": "b1982e8a-14ad-4029-a697-beb30af8340f",
|
||||
"name": "metadata",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MetadataField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"id": "f7669388-9f91-46cc-94fc-301fa7041c3e",
|
||||
"name": "latents",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"vae": {
|
||||
"id": "c6f2d4db-4d0a-4e3d-acb4-b5c5a228a3e2",
|
||||
"name": "vae",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"tiled": {
|
||||
"id": "19ef7d31-d96f-4e94-b7e5-95914e9076fc",
|
||||
"name": "tiled",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": false
|
||||
},
|
||||
"fp32": {
|
||||
"id": "a9454533-8ab7-4225-b411-646dc5e76d00",
|
||||
"name": "fp32",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": false
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"image": {
|
||||
"id": "4f81274e-e216-47f3-9fb6-f97493a40e6f",
|
||||
"name": "image",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ImageField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "61a9acfb-1547-4f1e-8214-e89bd3855ee5",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "b15cc793-4172-4b07-bcf4-5627bbc7d0d7",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
"isOpen": true,
|
||||
"isIntermediate": false,
|
||||
"useCache": true
|
||||
},
|
||||
"position": {
|
||||
"x": 4450,
|
||||
"y": -550
|
||||
},
|
||||
"width": 320,
|
||||
"height": 224
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
41
invokeai/backend/image_util/canny.py
Normal file
41
invokeai/backend/image_util/canny.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import cv2
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.util import (
|
||||
cv2_to_pil,
|
||||
normalize_image_channel_count,
|
||||
pil_to_cv2,
|
||||
resize_image_to_resolution,
|
||||
)
|
||||
|
||||
|
||||
def get_canny_edges(
|
||||
image: Image.Image, low_threshold: int, high_threshold: int, detect_resolution: int, image_resolution: int
|
||||
) -> Image.Image:
|
||||
"""Returns the edges of an image using the Canny edge detection algorithm.
|
||||
|
||||
Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license).
|
||||
|
||||
Args:
|
||||
image: The input image.
|
||||
low_threshold: The lower threshold for the hysteresis procedure.
|
||||
high_threshold: The upper threshold for the hysteresis procedure.
|
||||
input_resolution: The resolution of the input image. The image will be resized to this resolution before edge detection.
|
||||
output_resolution: The resolution of the output image. The edges will be resized to this resolution before returning.
|
||||
|
||||
Returns:
|
||||
The Canny edges of the input image.
|
||||
"""
|
||||
|
||||
if image.mode != "RGB":
|
||||
image = image.convert("RGB")
|
||||
|
||||
np_image = pil_to_cv2(image)
|
||||
np_image = normalize_image_channel_count(np_image)
|
||||
np_image = resize_image_to_resolution(np_image, detect_resolution)
|
||||
|
||||
edge_map = cv2.Canny(np_image, low_threshold, high_threshold)
|
||||
edge_map = normalize_image_channel_count(edge_map)
|
||||
edge_map = resize_image_to_resolution(edge_map, image_resolution)
|
||||
|
||||
return cv2_to_pil(edge_map)
|
||||
142
invokeai/backend/image_util/hed.py
Normal file
142
invokeai/backend/image_util/hed.py
Normal file
@@ -0,0 +1,142 @@
|
||||
"""Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license)."""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from einops import rearrange
|
||||
from huggingface_hub import hf_hub_download
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.util import (
|
||||
non_maximum_suppression,
|
||||
normalize_image_channel_count,
|
||||
np_to_pil,
|
||||
pil_to_np,
|
||||
resize_image_to_resolution,
|
||||
safe_step,
|
||||
)
|
||||
|
||||
|
||||
class DoubleConvBlock(torch.nn.Module):
|
||||
def __init__(self, input_channel, output_channel, layer_number):
|
||||
super().__init__()
|
||||
self.convs = torch.nn.Sequential()
|
||||
self.convs.append(
|
||||
torch.nn.Conv2d(
|
||||
in_channels=input_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1
|
||||
)
|
||||
)
|
||||
for _i in range(1, layer_number):
|
||||
self.convs.append(
|
||||
torch.nn.Conv2d(
|
||||
in_channels=output_channel,
|
||||
out_channels=output_channel,
|
||||
kernel_size=(3, 3),
|
||||
stride=(1, 1),
|
||||
padding=1,
|
||||
)
|
||||
)
|
||||
self.projection = torch.nn.Conv2d(
|
||||
in_channels=output_channel, out_channels=1, kernel_size=(1, 1), stride=(1, 1), padding=0
|
||||
)
|
||||
|
||||
def __call__(self, x, down_sampling=False):
|
||||
h = x
|
||||
if down_sampling:
|
||||
h = torch.nn.functional.max_pool2d(h, kernel_size=(2, 2), stride=(2, 2))
|
||||
for conv in self.convs:
|
||||
h = conv(h)
|
||||
h = torch.nn.functional.relu(h)
|
||||
return h, self.projection(h)
|
||||
|
||||
|
||||
class ControlNetHED_Apache2(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.norm = torch.nn.Parameter(torch.zeros(size=(1, 3, 1, 1)))
|
||||
self.block1 = DoubleConvBlock(input_channel=3, output_channel=64, layer_number=2)
|
||||
self.block2 = DoubleConvBlock(input_channel=64, output_channel=128, layer_number=2)
|
||||
self.block3 = DoubleConvBlock(input_channel=128, output_channel=256, layer_number=3)
|
||||
self.block4 = DoubleConvBlock(input_channel=256, output_channel=512, layer_number=3)
|
||||
self.block5 = DoubleConvBlock(input_channel=512, output_channel=512, layer_number=3)
|
||||
|
||||
def __call__(self, x):
|
||||
h = x - self.norm
|
||||
h, projection1 = self.block1(h)
|
||||
h, projection2 = self.block2(h, down_sampling=True)
|
||||
h, projection3 = self.block3(h, down_sampling=True)
|
||||
h, projection4 = self.block4(h, down_sampling=True)
|
||||
h, projection5 = self.block5(h, down_sampling=True)
|
||||
return projection1, projection2, projection3, projection4, projection5
|
||||
|
||||
|
||||
class HEDProcessor:
|
||||
"""Holistically-Nested Edge Detection.
|
||||
|
||||
On instantiation, loads the HED model from the HuggingFace Hub.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
model_path = hf_hub_download("lllyasviel/Annotators", "ControlNetHED.pth")
|
||||
self.network = ControlNetHED_Apache2()
|
||||
self.network.load_state_dict(torch.load(model_path, map_location="cpu"))
|
||||
self.network.float().eval()
|
||||
|
||||
def to(self, device: torch.device):
|
||||
self.network.to(device)
|
||||
return self
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_image: Image.Image,
|
||||
detect_resolution: int = 512,
|
||||
image_resolution: int = 512,
|
||||
safe: bool = False,
|
||||
scribble: bool = False,
|
||||
) -> Image.Image:
|
||||
"""Processes an image and returns the detected edges.
|
||||
|
||||
Args:
|
||||
input_image: The input image.
|
||||
detect_resolution: The resolution to fit the image to before edge detection.
|
||||
image_resolution: The resolution to fit the edges to before returning.
|
||||
safe: Whether to apply safe step to the detected edges.
|
||||
scribble: Whether to apply non-maximum suppression and Gaussian blur to the detected edges.
|
||||
|
||||
Returns:
|
||||
The detected edges.
|
||||
"""
|
||||
device = next(iter(self.network.parameters())).device
|
||||
np_image = pil_to_np(input_image)
|
||||
np_image = normalize_image_channel_count(np_image)
|
||||
np_image = resize_image_to_resolution(np_image, detect_resolution)
|
||||
|
||||
assert np_image.ndim == 3
|
||||
height, width, _channels = np_image.shape
|
||||
with torch.no_grad():
|
||||
image_hed = torch.from_numpy(np_image.copy()).float().to(device)
|
||||
image_hed = rearrange(image_hed, "h w c -> 1 c h w")
|
||||
edges = self.network(image_hed)
|
||||
edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges]
|
||||
edges = [cv2.resize(e, (width, height), interpolation=cv2.INTER_LINEAR) for e in edges]
|
||||
edges = np.stack(edges, axis=2)
|
||||
edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64)))
|
||||
if safe:
|
||||
edge = safe_step(edge)
|
||||
edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
|
||||
|
||||
detected_map = edge
|
||||
detected_map = normalize_image_channel_count(detected_map)
|
||||
|
||||
img = resize_image_to_resolution(np_image, image_resolution)
|
||||
height, width, _channels = img.shape
|
||||
|
||||
detected_map = cv2.resize(detected_map, (width, height), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
if scribble:
|
||||
detected_map = non_maximum_suppression(detected_map, 127, 3.0)
|
||||
detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0)
|
||||
detected_map[detected_map > 4] = 255
|
||||
detected_map[detected_map < 255] = 0
|
||||
|
||||
return np_to_pil(detected_map)
|
||||
158
invokeai/backend/image_util/lineart.py
Normal file
158
invokeai/backend/image_util/lineart.py
Normal file
@@ -0,0 +1,158 @@
|
||||
"""Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license)."""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from einops import rearrange
|
||||
from huggingface_hub import hf_hub_download
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.util import (
|
||||
normalize_image_channel_count,
|
||||
np_to_pil,
|
||||
pil_to_np,
|
||||
resize_image_to_resolution,
|
||||
)
|
||||
|
||||
|
||||
class ResidualBlock(nn.Module):
|
||||
def __init__(self, in_features):
|
||||
super(ResidualBlock, self).__init__()
|
||||
|
||||
conv_block = [
|
||||
nn.ReflectionPad2d(1),
|
||||
nn.Conv2d(in_features, in_features, 3),
|
||||
nn.InstanceNorm2d(in_features),
|
||||
nn.ReLU(inplace=True),
|
||||
nn.ReflectionPad2d(1),
|
||||
nn.Conv2d(in_features, in_features, 3),
|
||||
nn.InstanceNorm2d(in_features),
|
||||
]
|
||||
|
||||
self.conv_block = nn.Sequential(*conv_block)
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.conv_block(x)
|
||||
|
||||
|
||||
class Generator(nn.Module):
|
||||
def __init__(self, input_nc, output_nc, n_residual_blocks=9, sigmoid=True):
|
||||
super(Generator, self).__init__()
|
||||
|
||||
# Initial convolution block
|
||||
model0 = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, 64, 7), nn.InstanceNorm2d(64), nn.ReLU(inplace=True)]
|
||||
self.model0 = nn.Sequential(*model0)
|
||||
|
||||
# Downsampling
|
||||
model1 = []
|
||||
in_features = 64
|
||||
out_features = in_features * 2
|
||||
for _ in range(2):
|
||||
model1 += [
|
||||
nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
|
||||
nn.InstanceNorm2d(out_features),
|
||||
nn.ReLU(inplace=True),
|
||||
]
|
||||
in_features = out_features
|
||||
out_features = in_features * 2
|
||||
self.model1 = nn.Sequential(*model1)
|
||||
|
||||
model2 = []
|
||||
# Residual blocks
|
||||
for _ in range(n_residual_blocks):
|
||||
model2 += [ResidualBlock(in_features)]
|
||||
self.model2 = nn.Sequential(*model2)
|
||||
|
||||
# Upsampling
|
||||
model3 = []
|
||||
out_features = in_features // 2
|
||||
for _ in range(2):
|
||||
model3 += [
|
||||
nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),
|
||||
nn.InstanceNorm2d(out_features),
|
||||
nn.ReLU(inplace=True),
|
||||
]
|
||||
in_features = out_features
|
||||
out_features = in_features // 2
|
||||
self.model3 = nn.Sequential(*model3)
|
||||
|
||||
# Output layer
|
||||
model4 = [nn.ReflectionPad2d(3), nn.Conv2d(64, output_nc, 7)]
|
||||
if sigmoid:
|
||||
model4 += [nn.Sigmoid()]
|
||||
|
||||
self.model4 = nn.Sequential(*model4)
|
||||
|
||||
def forward(self, x, cond=None):
|
||||
out = self.model0(x)
|
||||
out = self.model1(out)
|
||||
out = self.model2(out)
|
||||
out = self.model3(out)
|
||||
out = self.model4(out)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class LineartProcessor:
|
||||
"""Processor for lineart detection."""
|
||||
|
||||
def __init__(self):
|
||||
model_path = hf_hub_download("lllyasviel/Annotators", "sk_model.pth")
|
||||
self.model = Generator(3, 1, 3)
|
||||
self.model.load_state_dict(torch.load(model_path, map_location=torch.device("cpu")))
|
||||
self.model.eval()
|
||||
|
||||
coarse_model_path = hf_hub_download("lllyasviel/Annotators", "sk_model2.pth")
|
||||
self.model_coarse = Generator(3, 1, 3)
|
||||
self.model_coarse.load_state_dict(torch.load(coarse_model_path, map_location=torch.device("cpu")))
|
||||
self.model_coarse.eval()
|
||||
|
||||
def to(self, device: torch.device):
|
||||
self.model.to(device)
|
||||
self.model_coarse.to(device)
|
||||
return self
|
||||
|
||||
def run(
|
||||
self, input_image: Image.Image, coarse: bool = False, detect_resolution: int = 512, image_resolution: int = 512
|
||||
) -> Image.Image:
|
||||
"""Processes an image to detect lineart.
|
||||
|
||||
Args:
|
||||
input_image: The input image.
|
||||
coarse: Whether to use the coarse model.
|
||||
detect_resolution: The resolution to fit the image to before edge detection.
|
||||
image_resolution: The resolution of the output image.
|
||||
|
||||
Returns:
|
||||
The detected lineart.
|
||||
"""
|
||||
device = next(iter(self.model.parameters())).device
|
||||
|
||||
np_image = pil_to_np(input_image)
|
||||
np_image = normalize_image_channel_count(np_image)
|
||||
np_image = resize_image_to_resolution(np_image, detect_resolution)
|
||||
|
||||
model = self.model_coarse if coarse else self.model
|
||||
assert np_image.ndim == 3
|
||||
image = np_image
|
||||
with torch.no_grad():
|
||||
image = torch.from_numpy(image).float().to(device)
|
||||
image = image / 255.0
|
||||
image = rearrange(image, "h w c -> 1 c h w")
|
||||
line = model(image)[0][0]
|
||||
|
||||
line = line.cpu().numpy()
|
||||
line = (line * 255.0).clip(0, 255).astype(np.uint8)
|
||||
|
||||
detected_map = line
|
||||
|
||||
detected_map = normalize_image_channel_count(detected_map)
|
||||
|
||||
img = resize_image_to_resolution(np_image, image_resolution)
|
||||
H, W, C = img.shape
|
||||
|
||||
detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
|
||||
detected_map = 255 - detected_map
|
||||
|
||||
return np_to_pil(detected_map)
|
||||
203
invokeai/backend/image_util/lineart_anime.py
Normal file
203
invokeai/backend/image_util/lineart_anime.py
Normal file
@@ -0,0 +1,203 @@
|
||||
"""Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license)."""
|
||||
|
||||
import functools
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from einops import rearrange
|
||||
from huggingface_hub import hf_hub_download
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.util import (
|
||||
normalize_image_channel_count,
|
||||
np_to_pil,
|
||||
pil_to_np,
|
||||
resize_image_to_resolution,
|
||||
)
|
||||
|
||||
|
||||
class UnetGenerator(nn.Module):
|
||||
"""Create a Unet-based generator"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
input_nc: int,
|
||||
output_nc: int,
|
||||
num_downs: int,
|
||||
ngf: int = 64,
|
||||
norm_layer=nn.BatchNorm2d,
|
||||
use_dropout: bool = False,
|
||||
):
|
||||
"""Construct a Unet generator
|
||||
Parameters:
|
||||
input_nc (int) -- the number of channels in input images
|
||||
output_nc (int) -- the number of channels in output images
|
||||
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
|
||||
image of size 128x128 will become of size 1x1 # at the bottleneck
|
||||
ngf (int) -- the number of filters in the last conv layer
|
||||
norm_layer -- normalization layer
|
||||
We construct the U-Net from the innermost layer to the outermost layer.
|
||||
It is a recursive process.
|
||||
"""
|
||||
super(UnetGenerator, self).__init__()
|
||||
# construct unet structure
|
||||
unet_block = UnetSkipConnectionBlock(
|
||||
ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True
|
||||
) # add the innermost layer
|
||||
for _ in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
|
||||
unet_block = UnetSkipConnectionBlock(
|
||||
ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout
|
||||
)
|
||||
# gradually reduce the number of filters from ngf * 8 to ngf
|
||||
unet_block = UnetSkipConnectionBlock(
|
||||
ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer
|
||||
)
|
||||
unet_block = UnetSkipConnectionBlock(
|
||||
ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer
|
||||
)
|
||||
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
||||
self.model = UnetSkipConnectionBlock(
|
||||
output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer
|
||||
) # add the outermost layer
|
||||
|
||||
def forward(self, input):
|
||||
"""Standard forward"""
|
||||
return self.model(input)
|
||||
|
||||
|
||||
class UnetSkipConnectionBlock(nn.Module):
|
||||
"""Defines the Unet submodule with skip connection.
|
||||
X -------------------identity----------------------
|
||||
|-- downsampling -- |submodule| -- upsampling --|
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
outer_nc: int,
|
||||
inner_nc: int,
|
||||
input_nc: Optional[int] = None,
|
||||
submodule=None,
|
||||
outermost: bool = False,
|
||||
innermost: bool = False,
|
||||
norm_layer=nn.BatchNorm2d,
|
||||
use_dropout: bool = False,
|
||||
):
|
||||
"""Construct a Unet submodule with skip connections.
|
||||
Parameters:
|
||||
outer_nc (int) -- the number of filters in the outer conv layer
|
||||
inner_nc (int) -- the number of filters in the inner conv layer
|
||||
input_nc (int) -- the number of channels in input images/features
|
||||
submodule (UnetSkipConnectionBlock) -- previously defined submodules
|
||||
outermost (bool) -- if this module is the outermost module
|
||||
innermost (bool) -- if this module is the innermost module
|
||||
norm_layer -- normalization layer
|
||||
use_dropout (bool) -- if use dropout layers.
|
||||
"""
|
||||
super(UnetSkipConnectionBlock, self).__init__()
|
||||
self.outermost = outermost
|
||||
if type(norm_layer) == functools.partial:
|
||||
use_bias = norm_layer.func == nn.InstanceNorm2d
|
||||
else:
|
||||
use_bias = norm_layer == nn.InstanceNorm2d
|
||||
if input_nc is None:
|
||||
input_nc = outer_nc
|
||||
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
|
||||
downrelu = nn.LeakyReLU(0.2, True)
|
||||
downnorm = norm_layer(inner_nc)
|
||||
uprelu = nn.ReLU(True)
|
||||
upnorm = norm_layer(outer_nc)
|
||||
|
||||
if outermost:
|
||||
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)
|
||||
down = [downconv]
|
||||
up = [uprelu, upconv, nn.Tanh()]
|
||||
model = down + [submodule] + up
|
||||
elif innermost:
|
||||
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
|
||||
down = [downrelu, downconv]
|
||||
up = [uprelu, upconv, upnorm]
|
||||
model = down + up
|
||||
else:
|
||||
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
|
||||
down = [downrelu, downconv, downnorm]
|
||||
up = [uprelu, upconv, upnorm]
|
||||
|
||||
if use_dropout:
|
||||
model = down + [submodule] + up + [nn.Dropout(0.5)]
|
||||
else:
|
||||
model = down + [submodule] + up
|
||||
|
||||
self.model = nn.Sequential(*model)
|
||||
|
||||
def forward(self, x):
|
||||
if self.outermost:
|
||||
return self.model(x)
|
||||
else: # add skip connections
|
||||
return torch.cat([x, self.model(x)], 1)
|
||||
|
||||
|
||||
class LineartAnimeProcessor:
|
||||
"""Processes an image to detect lineart."""
|
||||
|
||||
def __init__(self):
|
||||
model_path = hf_hub_download("lllyasviel/Annotators", "netG.pth")
|
||||
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
|
||||
self.model = UnetGenerator(3, 1, 8, 64, norm_layer=norm_layer, use_dropout=False)
|
||||
ckpt = torch.load(model_path)
|
||||
for key in list(ckpt.keys()):
|
||||
if "module." in key:
|
||||
ckpt[key.replace("module.", "")] = ckpt[key]
|
||||
del ckpt[key]
|
||||
self.model.load_state_dict(ckpt)
|
||||
self.model.eval()
|
||||
|
||||
def to(self, device: torch.device):
|
||||
self.model.to(device)
|
||||
return self
|
||||
|
||||
def run(self, input_image: Image.Image, detect_resolution: int = 512, image_resolution: int = 512) -> Image.Image:
|
||||
"""Processes an image to detect lineart.
|
||||
|
||||
Args:
|
||||
input_image: The input image.
|
||||
detect_resolution: The resolution to use for detection.
|
||||
image_resolution: The resolution to use for the output image.
|
||||
|
||||
Returns:
|
||||
The detected lineart.
|
||||
"""
|
||||
device = next(iter(self.model.parameters())).device
|
||||
np_image = pil_to_np(input_image)
|
||||
|
||||
np_image = normalize_image_channel_count(np_image)
|
||||
np_image = resize_image_to_resolution(np_image, detect_resolution)
|
||||
|
||||
H, W, C = np_image.shape
|
||||
Hn = 256 * int(np.ceil(float(H) / 256.0))
|
||||
Wn = 256 * int(np.ceil(float(W) / 256.0))
|
||||
img = cv2.resize(np_image, (Wn, Hn), interpolation=cv2.INTER_CUBIC)
|
||||
with torch.no_grad():
|
||||
image_feed = torch.from_numpy(img).float().to(device)
|
||||
image_feed = image_feed / 127.5 - 1.0
|
||||
image_feed = rearrange(image_feed, "h w c -> 1 c h w")
|
||||
|
||||
line = self.model(image_feed)[0, 0] * 127.5 + 127.5
|
||||
line = line.cpu().numpy()
|
||||
|
||||
line = cv2.resize(line, (W, H), interpolation=cv2.INTER_CUBIC)
|
||||
line = line.clip(0, 255).astype(np.uint8)
|
||||
|
||||
detected_map = line
|
||||
|
||||
detected_map = normalize_image_channel_count(detected_map)
|
||||
|
||||
img = resize_image_to_resolution(np_image, image_resolution)
|
||||
H, W, C = img.shape
|
||||
|
||||
detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
|
||||
detected_map = 255 - detected_map
|
||||
|
||||
return np_to_pil(detected_map)
|
||||
@@ -1,5 +1,7 @@
|
||||
from math import ceil, floor, sqrt
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
|
||||
@@ -69,3 +71,134 @@ def make_grid(image_list, rows=None, cols=None):
|
||||
i = i + 1
|
||||
|
||||
return grid_img
|
||||
|
||||
|
||||
def pil_to_np(image: Image.Image) -> np.ndarray:
|
||||
"""Converts a PIL image to a numpy array."""
|
||||
return np.array(image, dtype=np.uint8)
|
||||
|
||||
|
||||
def np_to_pil(image: np.ndarray) -> Image.Image:
|
||||
"""Converts a numpy array to a PIL image."""
|
||||
return Image.fromarray(image)
|
||||
|
||||
|
||||
def pil_to_cv2(image: Image.Image) -> np.ndarray:
|
||||
"""Converts a PIL image to a CV2 image."""
|
||||
return cv2.cvtColor(np.array(image, dtype=np.uint8), cv2.COLOR_RGB2BGR)
|
||||
|
||||
|
||||
def cv2_to_pil(image: np.ndarray) -> Image.Image:
|
||||
"""Converts a CV2 image to a PIL image."""
|
||||
return Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
||||
|
||||
|
||||
def normalize_image_channel_count(image: np.ndarray) -> np.ndarray:
|
||||
"""Normalizes an image to have 3 channels.
|
||||
|
||||
If the image has 1 channel, it will be duplicated 3 times.
|
||||
If the image has 1 channel, a third empty channel will be added.
|
||||
If the image has 4 channels, the alpha channel will be used to blend the image with a white background.
|
||||
|
||||
Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license).
|
||||
|
||||
Args:
|
||||
image: The input image.
|
||||
|
||||
Returns:
|
||||
The normalized image.
|
||||
"""
|
||||
assert image.dtype == np.uint8
|
||||
if image.ndim == 2:
|
||||
image = image[:, :, None]
|
||||
assert image.ndim == 3
|
||||
_height, _width, channels = image.shape
|
||||
assert channels == 1 or channels == 3 or channels == 4
|
||||
if channels == 3:
|
||||
return image
|
||||
if channels == 1:
|
||||
return np.concatenate([image, image, image], axis=2)
|
||||
if channels == 4:
|
||||
color = image[:, :, 0:3].astype(np.float32)
|
||||
alpha = image[:, :, 3:4].astype(np.float32) / 255.0
|
||||
normalized = color * alpha + 255.0 * (1.0 - alpha)
|
||||
normalized = normalized.clip(0, 255).astype(np.uint8)
|
||||
return normalized
|
||||
|
||||
raise ValueError("Invalid number of channels.")
|
||||
|
||||
|
||||
def resize_image_to_resolution(input_image: np.ndarray, resolution: int) -> np.ndarray:
|
||||
"""Resizes an image, fitting it to the given resolution.
|
||||
|
||||
Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license).
|
||||
|
||||
Args:
|
||||
input_image: The input image.
|
||||
resolution: The resolution to fit the image to.
|
||||
|
||||
Returns:
|
||||
The resized image.
|
||||
"""
|
||||
h = float(input_image.shape[0])
|
||||
w = float(input_image.shape[1])
|
||||
scaling_factor = float(resolution) / min(h, w)
|
||||
h *= scaling_factor
|
||||
w *= scaling_factor
|
||||
h = int(np.round(h / 64.0)) * 64
|
||||
w = int(np.round(w / 64.0)) * 64
|
||||
if scaling_factor > 1:
|
||||
return cv2.resize(input_image, (w, h), interpolation=cv2.INTER_LANCZOS4)
|
||||
else:
|
||||
return cv2.resize(input_image, (w, h), interpolation=cv2.INTER_AREA)
|
||||
|
||||
|
||||
def non_maximum_suppression(image: np.ndarray, threshold: int, sigma: float):
|
||||
"""
|
||||
Apply non-maximum suppression to an image.
|
||||
|
||||
This function is adapted from https://github.com/lllyasviel/ControlNet.
|
||||
|
||||
Args:
|
||||
image: The input image.
|
||||
threshold: The threshold value for the suppression. Pixels with values greater than this will be set to 255.
|
||||
sigma: The standard deviation for the Gaussian blur applied to the image.
|
||||
|
||||
Returns:
|
||||
The image after non-maximum suppression.
|
||||
"""
|
||||
|
||||
image = cv2.GaussianBlur(image.astype(np.float32), (0, 0), sigma)
|
||||
|
||||
filter_1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
|
||||
filter_2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
|
||||
filter_3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
|
||||
filter_4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
|
||||
|
||||
y = np.zeros_like(image)
|
||||
|
||||
for f in [filter_1, filter_2, filter_3, filter_4]:
|
||||
np.putmask(y, cv2.dilate(image, kernel=f) == image, image)
|
||||
|
||||
z = np.zeros_like(y, dtype=np.uint8)
|
||||
z[y > threshold] = 255
|
||||
return z
|
||||
|
||||
|
||||
def safe_step(x: np.ndarray, step: int = 2) -> np.ndarray:
|
||||
"""Apply the safe step operation to an array.
|
||||
|
||||
I don't fully understand the purpose of this function, but it appears to be normalizing/quantizing the array.
|
||||
|
||||
Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license).
|
||||
|
||||
Args:
|
||||
x: The input array.
|
||||
step: The step value.
|
||||
|
||||
Returns:
|
||||
The array after the safe step operation.
|
||||
"""
|
||||
y = x.astype(np.float32) * float(step + 1)
|
||||
y = y.astype(np.int32).astype(np.float32) / float(step)
|
||||
return y
|
||||
|
||||
@@ -6,10 +6,14 @@ from pathlib import Path
|
||||
from typing import Callable, Literal, Optional, Union
|
||||
|
||||
from blake3 import blake3
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
|
||||
HASHING_ALGORITHMS = Literal[
|
||||
"blake3_multi",
|
||||
"blake3_single",
|
||||
"random",
|
||||
"md5",
|
||||
"sha1",
|
||||
"sha224",
|
||||
@@ -24,9 +28,6 @@ HASHING_ALGORITHMS = Literal[
|
||||
"sha3_512",
|
||||
"shake_128",
|
||||
"shake_256",
|
||||
"blake3",
|
||||
"blake3_single",
|
||||
"random",
|
||||
]
|
||||
MODEL_FILE_EXTENSIONS = (".ckpt", ".safetensors", ".bin", ".pt", ".pth")
|
||||
|
||||
@@ -60,10 +61,10 @@ class ModelHash:
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, algorithm: HASHING_ALGORITHMS = "blake3", file_filter: Optional[Callable[[str], bool]] = None
|
||||
self, algorithm: HASHING_ALGORITHMS = "blake3_single", file_filter: Optional[Callable[[str], bool]] = None
|
||||
) -> None:
|
||||
self.algorithm: HASHING_ALGORITHMS = algorithm
|
||||
if algorithm == "blake3":
|
||||
if algorithm == "blake3_multi":
|
||||
self._hash_file = self._blake3
|
||||
elif algorithm == "blake3_single":
|
||||
self._hash_file = self._blake3_single
|
||||
@@ -94,7 +95,14 @@ class ModelHash:
|
||||
# blake3_single is a single-threaded version of blake3, prefix should still be "blake3:"
|
||||
prefix = self._get_prefix(self.algorithm)
|
||||
if model_path.is_file():
|
||||
return prefix + self._hash_file(model_path)
|
||||
hash_ = None
|
||||
# To give a similar user experience for single files and directories, we use a progress bar even for single files
|
||||
pbar = tqdm([model_path], desc=f"Hashing {model_path.name}", unit="file")
|
||||
for component in pbar:
|
||||
pbar.set_description(f"Hashing {component.name}")
|
||||
hash_ = prefix + self._hash_file(model_path)
|
||||
assert hash_ is not None
|
||||
return hash_
|
||||
elif model_path.is_dir():
|
||||
return prefix + self._hash_dir(model_path)
|
||||
else:
|
||||
@@ -112,7 +120,9 @@ class ModelHash:
|
||||
model_component_paths = self._get_file_paths(dir, self._file_filter)
|
||||
|
||||
component_hashes: list[str] = []
|
||||
for component in sorted(model_component_paths):
|
||||
pbar = tqdm(sorted(model_component_paths), desc=f"Hashing {dir.name}", unit="file")
|
||||
for component in pbar:
|
||||
pbar.set_description(f"Hashing {component.name}")
|
||||
component_hashes.append(self._hash_file(component))
|
||||
|
||||
# BLAKE3 is cryptographically secure. We may as well fall back on a secure algorithm
|
||||
@@ -216,4 +226,4 @@ class ModelHash:
|
||||
def _get_prefix(algorithm: HASHING_ALGORITHMS) -> str:
|
||||
"""Return the prefix for the given algorithm, e.g. \"blake3:\" or \"md5:\"."""
|
||||
# blake3_single is a single-threaded version of blake3, prefix should still be "blake3:"
|
||||
return "blake3:" if algorithm == "blake3_single" else f"{algorithm}:"
|
||||
return "blake3:" if algorithm == "blake3_single" or algorithm == "blake3_multi" else f"{algorithm}:"
|
||||
|
||||
@@ -44,7 +44,7 @@ class ControlNetLoader(GenericDiffusersLoader):
|
||||
)
|
||||
|
||||
self._logger.info(f"Converting {model_path} to diffusers format")
|
||||
with open(config.config_path, "r") as config_stream:
|
||||
with open(self._app_config.root_path / config.config_path, "r") as config_stream:
|
||||
convert_controlnet_to_diffusers(
|
||||
model_path,
|
||||
output_path,
|
||||
|
||||
@@ -91,7 +91,7 @@ class StableDiffusionDiffusersModel(GenericDiffusersLoader):
|
||||
model_path,
|
||||
output_path,
|
||||
model_type=self.model_base_to_model_type[base],
|
||||
original_config_file=config.config_path,
|
||||
original_config_file=self._app_config.root_path / config.config_path,
|
||||
extract_ema=True,
|
||||
from_safetensors=model_path.suffix == ".safetensors",
|
||||
precision=self._torch_dtype,
|
||||
|
||||
@@ -44,7 +44,7 @@ class VAELoader(GenericDiffusersLoader):
|
||||
raise Exception(f"VAE conversion not supported for model type: {config.base}")
|
||||
else:
|
||||
assert isinstance(config, CheckpointConfigBase)
|
||||
config_file = config.config_path
|
||||
config_file = self._app_config.root_path / config.config_path
|
||||
|
||||
if model_path.suffix == ".safetensors":
|
||||
checkpoint = safetensors_load_file(model_path, device="cpu")
|
||||
@@ -55,7 +55,7 @@ class VAELoader(GenericDiffusersLoader):
|
||||
if "state_dict" in checkpoint:
|
||||
checkpoint = checkpoint["state_dict"]
|
||||
|
||||
ckpt_config = OmegaConf.load(self._app_config.root_path / config_file)
|
||||
ckpt_config = OmegaConf.load(config_file)
|
||||
assert isinstance(ckpt_config, DictConfig)
|
||||
self._logger.info(f"Converting {model_path} to diffusers format")
|
||||
vae_model = convert_ldm_vae_to_diffusers(
|
||||
|
||||
@@ -114,7 +114,7 @@ class ModelProbe(object):
|
||||
|
||||
@classmethod
|
||||
def probe(
|
||||
cls, model_path: Path, fields: Optional[Dict[str, Any]] = None, hash_algo: HASHING_ALGORITHMS = "blake3"
|
||||
cls, model_path: Path, fields: Optional[Dict[str, Any]] = None, hash_algo: HASHING_ALGORITHMS = "blake3_single"
|
||||
) -> AnyModelConfig:
|
||||
"""
|
||||
Probe the model at model_path and return its configuration record.
|
||||
@@ -228,7 +228,7 @@ class ModelProbe(object):
|
||||
return ModelType.LoRA
|
||||
elif any(key.endswith(v) for v in {"to_k_lora.up.weight", "to_q_lora.down.weight"}):
|
||||
return ModelType.LoRA
|
||||
elif any(key.startswith(v) for v in {"control_model", "input_blocks"}):
|
||||
elif any(key.startswith(v) for v in {"controlnet", "control_model", "input_blocks"}):
|
||||
return ModelType.ControlNet
|
||||
elif key in {"emb_params", "string_to_param"}:
|
||||
return ModelType.TextualInversion
|
||||
@@ -508,15 +508,22 @@ class ControlNetCheckpointProbe(CheckpointProbeBase):
|
||||
checkpoint = self.checkpoint
|
||||
for key_name in (
|
||||
"control_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight",
|
||||
"controlnet_mid_block.bias",
|
||||
"input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight",
|
||||
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight",
|
||||
):
|
||||
if key_name not in checkpoint:
|
||||
continue
|
||||
if checkpoint[key_name].shape[-1] == 768:
|
||||
width = checkpoint[key_name].shape[-1]
|
||||
if width == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif checkpoint[key_name].shape[-1] == 1024:
|
||||
elif width == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
raise InvalidModelConfigException("{self.model_path}: Unable to determine base type")
|
||||
elif width == 2048:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
elif width == 1280:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
raise InvalidModelConfigException(f"{self.model_path}: Unable to determine base type")
|
||||
|
||||
|
||||
class IPAdapterCheckpointProbe(CheckpointProbeBase):
|
||||
|
||||
@@ -1,149 +1,156 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from invokeai.backend.model_manager.config import BaseModelType, ModelType
|
||||
|
||||
|
||||
@dataclass
|
||||
class StarterModel:
|
||||
class StarterModelWithoutDependencies(BaseModel):
|
||||
description: str
|
||||
source: str
|
||||
name: str
|
||||
base: BaseModelType
|
||||
type: ModelType
|
||||
# Optional list of model source dependencies that need to be installed before this model can be used
|
||||
dependencies: Optional[list[str]] = None
|
||||
is_installed: bool = False
|
||||
|
||||
|
||||
class StarterModel(StarterModelWithoutDependencies):
|
||||
# Optional list of model source dependencies that need to be installed before this model can be used
|
||||
dependencies: Optional[list[StarterModelWithoutDependencies]] = None
|
||||
|
||||
|
||||
sdxl_fp16_vae_fix = StarterModel(
|
||||
name="sdxl-vae-fp16-fix",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="madebyollin/sdxl-vae-fp16-fix",
|
||||
description="SDXL VAE that works with FP16.",
|
||||
type=ModelType.VAE,
|
||||
)
|
||||
|
||||
ip_adapter_sd_image_encoder = StarterModel(
|
||||
name="IP Adapter SD1.5 Image Encoder",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="InvokeAI/ip_adapter_sd_image_encoder",
|
||||
description="IP Adapter SD Image Encoder",
|
||||
type=ModelType.CLIPVision,
|
||||
)
|
||||
|
||||
ip_adapter_sdxl_image_encoder = StarterModel(
|
||||
name="IP Adapter SDXL Image Encoder",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="InvokeAI/ip_adapter_sdxl_image_encoder",
|
||||
description="IP Adapter SDXL Image Encoder",
|
||||
type=ModelType.CLIPVision,
|
||||
)
|
||||
|
||||
cyberrealistic_negative = StarterModel(
|
||||
name="CyberRealistic Negative v3",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="https://huggingface.co/cyberdelia/CyberRealistic_Negative/resolve/main/CyberRealistic_Negative_v3.pt",
|
||||
description="Negative embedding specifically for use with CyberRealistic.",
|
||||
type=ModelType.TextualInversion,
|
||||
)
|
||||
|
||||
# List of starter models, displayed on the frontend.
|
||||
# The order/sort of this list is not changed by the frontend - set it how you want it here.
|
||||
STARTER_MODELS: list[StarterModel] = [
|
||||
# region: Main
|
||||
StarterModel(
|
||||
name="SD 1.5 (base)",
|
||||
name="CyberRealistic v4.1",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="runwayml/stable-diffusion-v1-5",
|
||||
description="Stable Diffusion version 1.5 diffusers model (4.27 GB)",
|
||||
source="https://huggingface.co/cyberdelia/CyberRealistic/resolve/main/CyberRealistic_V4.1_FP16.safetensors",
|
||||
description="Photorealistic model. See other variants in HF repo 'cyberdelia/CyberRealistic'.",
|
||||
type=ModelType.Main,
|
||||
dependencies=[cyberrealistic_negative],
|
||||
),
|
||||
StarterModel(
|
||||
name="ReV Animated",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="stablediffusionapi/rev-animated",
|
||||
description="Fantasy and anime style images.",
|
||||
type=ModelType.Main,
|
||||
),
|
||||
StarterModel(
|
||||
name="SD 1.5 (inpainting)",
|
||||
name="Dreamshaper 8",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="runwayml/stable-diffusion-inpainting",
|
||||
description="RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)",
|
||||
source="Lykon/dreamshaper-8",
|
||||
description="Popular versatile model.",
|
||||
type=ModelType.Main,
|
||||
),
|
||||
StarterModel(
|
||||
name="Analog Diffusion",
|
||||
name="Dreamshaper 8 (inpainting)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="wavymulder/Analog-Diffusion",
|
||||
description="An SD-1.5 model trained on diverse analog photographs (2.13 GB)",
|
||||
source="Lykon/dreamshaper-8-inpainting",
|
||||
description="Inpainting version of Dreamshaper 8.",
|
||||
type=ModelType.Main,
|
||||
),
|
||||
StarterModel(
|
||||
name="Deliberate v5",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="https://huggingface.co/XpucT/Deliberate/resolve/main/Deliberate_v5.safetensors",
|
||||
description="Versatile model that produces detailed images up to 768px (4.27 GB)",
|
||||
description="Popular versatile model",
|
||||
type=ModelType.Main,
|
||||
),
|
||||
StarterModel(
|
||||
name="Dungeons and Diffusion",
|
||||
name="Deliberate v5 (inpainting)",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="0xJustin/Dungeons-and-Diffusion",
|
||||
description="Dungeons & Dragons characters (2.13 GB)",
|
||||
source="https://huggingface.co/XpucT/Deliberate/resolve/main/Deliberate_v5-inpainting.safetensors",
|
||||
description="Inpainting version of Deliberate v5.",
|
||||
type=ModelType.Main,
|
||||
),
|
||||
StarterModel(
|
||||
name="dreamlike photoreal v2",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="dreamlike-art/dreamlike-photoreal-2.0",
|
||||
description="A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)",
|
||||
type=ModelType.Main,
|
||||
),
|
||||
StarterModel(
|
||||
name="Inkpunk Diffusion",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="Envvi/Inkpunk-Diffusion",
|
||||
description='Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)',
|
||||
type=ModelType.Main,
|
||||
),
|
||||
StarterModel(
|
||||
name="OpenJourney",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="prompthero/openjourney",
|
||||
description='An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)',
|
||||
type=ModelType.Main,
|
||||
),
|
||||
StarterModel(
|
||||
name="seek.art MEGA",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="coreco/seek.art_MEGA",
|
||||
description='A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)',
|
||||
type=ModelType.Main,
|
||||
),
|
||||
StarterModel(
|
||||
name="TrinArt v2",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="naclbit/trinart_stable_diffusion_v2",
|
||||
description="An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)",
|
||||
type=ModelType.Main,
|
||||
),
|
||||
StarterModel(
|
||||
name="SD 2.1 (base)",
|
||||
base=BaseModelType.StableDiffusion2,
|
||||
source="stabilityai/stable-diffusion-2-1",
|
||||
description="Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)",
|
||||
type=ModelType.Main,
|
||||
),
|
||||
StarterModel(
|
||||
name="SD 2.0 (inpainting)",
|
||||
base=BaseModelType.StableDiffusion2,
|
||||
source="stabilityai/stable-diffusion-2-inpainting",
|
||||
description="Stable Diffusion version 2.0 inpainting model (5.21 GB)",
|
||||
type=ModelType.Main,
|
||||
),
|
||||
StarterModel(
|
||||
name="SDXL (base)",
|
||||
name="Juggernaut XL v9",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="stabilityai/stable-diffusion-xl-base-1.0",
|
||||
description="Stable Diffusion XL base model (12 GB)",
|
||||
source="RunDiffusion/Juggernaut-XL-v9",
|
||||
description="Photograph-focused model.",
|
||||
type=ModelType.Main,
|
||||
dependencies=[sdxl_fp16_vae_fix],
|
||||
),
|
||||
StarterModel(
|
||||
name="Dreamshaper XL v2 Turbo",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="Lykon/dreamshaper-xl-v2-turbo",
|
||||
description="For turbo, use CFG Scale 2, 4-8 steps, DPM++ SDE Karras. For non-turbo, use CFG Scale 6, 20-40 steps, DPM++ 2M SDE Karras.",
|
||||
type=ModelType.Main,
|
||||
dependencies=[sdxl_fp16_vae_fix],
|
||||
),
|
||||
StarterModel(
|
||||
name="SDXL Refiner",
|
||||
base=BaseModelType.StableDiffusionXLRefiner,
|
||||
source="stabilityai/stable-diffusion-xl-refiner-1.0",
|
||||
description="Stable Diffusion XL refiner model (12 GB)",
|
||||
description="The OG Stable Diffusion XL refiner model.",
|
||||
type=ModelType.Main,
|
||||
dependencies=[sdxl_fp16_vae_fix],
|
||||
),
|
||||
# endregion
|
||||
# region VAE
|
||||
StarterModel(
|
||||
name="sdxl-vae-fp16-fix",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="madebyollin/sdxl-vae-fp16-fix",
|
||||
description="Version of the SDXL-1.0 VAE that works in half precision mode",
|
||||
type=ModelType.VAE,
|
||||
),
|
||||
sdxl_fp16_vae_fix,
|
||||
# endregion
|
||||
# region LoRA
|
||||
StarterModel(
|
||||
name="FlatColor",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="https://civitai.com/models/6433/loraflatcolor",
|
||||
description="A LoRA that generates scenery using solid blocks of color",
|
||||
name="Alien Style",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="https://huggingface.co/RalFinger/alien-style-lora-sdxl/resolve/main/alienzkin-sdxl.safetensors",
|
||||
description="Futuristic, intricate alien styles. Trigger with 'alienzkin'.",
|
||||
type=ModelType.LoRA,
|
||||
),
|
||||
StarterModel(
|
||||
name="Ink scenery",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="https://civitai.com/api/download/models/83390",
|
||||
description="Generate india ink-like landscapes",
|
||||
name="Noodles Style",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="https://huggingface.co/RalFinger/noodles-lora-sdxl/resolve/main/noodlez-sdxl.safetensors",
|
||||
description="Never-ending, no-holds-barred, noodle nightmare. Trigger with 'noodlez'.",
|
||||
type=ModelType.LoRA,
|
||||
),
|
||||
# endregion
|
||||
# region TI
|
||||
StarterModel(
|
||||
name="EasyNegative",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors",
|
||||
description="A textual inversion to use in the negative prompt to reduce bad anatomy",
|
||||
type=ModelType.TextualInversion,
|
||||
),
|
||||
# endregion
|
||||
# region IP Adapter
|
||||
StarterModel(
|
||||
name="IP Adapter",
|
||||
@@ -151,7 +158,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
source="InvokeAI/ip_adapter_sd15",
|
||||
description="IP-Adapter for SD 1.5 models",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=["InvokeAI/ip_adapter_sd_image_encoder"],
|
||||
dependencies=[ip_adapter_sd_image_encoder],
|
||||
),
|
||||
StarterModel(
|
||||
name="IP Adapter Plus",
|
||||
@@ -159,7 +166,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
source="InvokeAI/ip_adapter_plus_sd15",
|
||||
description="Refined IP-Adapter for SD 1.5 models",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=["InvokeAI/ip_adapter_sd_image_encoder"],
|
||||
dependencies=[ip_adapter_sd_image_encoder],
|
||||
),
|
||||
StarterModel(
|
||||
name="IP Adapter Plus Face",
|
||||
@@ -167,7 +174,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
source="InvokeAI/ip_adapter_plus_face_sd15",
|
||||
description="Refined IP-Adapter for SD 1.5 models, adapted for faces",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=["InvokeAI/ip_adapter_sd_image_encoder"],
|
||||
dependencies=[ip_adapter_sd_image_encoder],
|
||||
),
|
||||
StarterModel(
|
||||
name="IP Adapter SDXL",
|
||||
@@ -175,7 +182,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
source="InvokeAI/ip_adapter_sdxl",
|
||||
description="IP-Adapter for SDXL models",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=["InvokeAI/ip_adapter_sdxl_image_encoder"],
|
||||
dependencies=[ip_adapter_sdxl_image_encoder],
|
||||
),
|
||||
# endregion
|
||||
# region ControlNet
|
||||
@@ -378,15 +385,6 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
type=ModelType.T2IAdapter,
|
||||
),
|
||||
# endregion
|
||||
# region TI
|
||||
StarterModel(
|
||||
name="EasyNegative",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors",
|
||||
description="A textual inversion to use in the negative prompt to reduce bad anatomy",
|
||||
type=ModelType.TextualInversion,
|
||||
),
|
||||
# endregion
|
||||
]
|
||||
|
||||
assert len(STARTER_MODELS) == len({m.source for m in STARTER_MODELS}), "Duplicate starter models"
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
"areYouSure": "Sei sicuro?",
|
||||
"dontAskMeAgain": "Non chiedermelo più",
|
||||
"batch": "Gestione Lotto",
|
||||
"modelManager": "Gestore Modelli",
|
||||
"modelManager": "Gestione Modelli",
|
||||
"communityLabel": "Comunità",
|
||||
"nodeEditor": "Editor dei nodi",
|
||||
"advanced": "Avanzate",
|
||||
@@ -36,7 +36,7 @@
|
||||
"auto": "Automatico",
|
||||
"simple": "Semplice",
|
||||
"details": "Dettagli",
|
||||
"format": "formato",
|
||||
"format": "Formato",
|
||||
"unknown": "Sconosciuto",
|
||||
"folder": "Cartella",
|
||||
"error": "Errore",
|
||||
@@ -336,8 +336,8 @@
|
||||
"modelManager": {
|
||||
"modelManager": "Gestione Modelli",
|
||||
"model": "Modello",
|
||||
"allModels": "Tutti i Modelli",
|
||||
"modelUpdated": "Modello Aggiornato",
|
||||
"allModels": "Tutti i modelli",
|
||||
"modelUpdated": "Modello aggiornato",
|
||||
"manual": "Manuale",
|
||||
"name": "Nome",
|
||||
"description": "Descrizione",
|
||||
@@ -364,7 +364,7 @@
|
||||
"convertToDiffusersHelpText6": "Vuoi convertire questo modello?",
|
||||
"modelConverted": "Modello convertito",
|
||||
"alpha": "Alpha",
|
||||
"convertToDiffusersHelpText1": "Questo modello verrà convertito nel formato 🧨 Diffusore.",
|
||||
"convertToDiffusersHelpText1": "Questo modello verrà convertito nel formato 🧨 Diffusori.",
|
||||
"convertToDiffusersHelpText3": "Il file Checkpoint su disco verrà eliminato se si trova nella cartella principale di InvokeAI. Se si trova invece in una posizione personalizzata, NON verrà eliminato.",
|
||||
"v2_base": "v2 (512px)",
|
||||
"v2_768": "v2 (768px)",
|
||||
@@ -381,7 +381,7 @@
|
||||
"modelsSynced": "Modelli sincronizzati",
|
||||
"modelSyncFailed": "Sincronizzazione modello non riuscita",
|
||||
"settings": "Impostazioni",
|
||||
"syncModels": "Sincronizza Modelli",
|
||||
"syncModels": "Sincronizza modelli",
|
||||
"predictionType": "Tipo di previsione",
|
||||
"advanced": "Avanzate",
|
||||
"modelType": "Tipo di modello",
|
||||
@@ -421,7 +421,7 @@
|
||||
"inplaceInstallDesc": "Installa i modelli senza copiare i file. Quando si utilizza il modello, verrà caricato da questa posizione. Se disabilitato, i file del modello verranno copiati nella directory dei modelli gestiti da Invoke durante l'installazione.",
|
||||
"installQueue": "Coda di installazione",
|
||||
"install": "Installa",
|
||||
"installRepo": "Installa Repo",
|
||||
"installRepo": "Installa Repository",
|
||||
"huggingFacePlaceholder": "proprietario/nome-modello",
|
||||
"huggingFaceHelper": "Se in questo repository vengono trovati più modelli, ti verrà richiesto di selezionarne uno da installare.",
|
||||
"installAll": "Installa tutto",
|
||||
@@ -429,7 +429,20 @@
|
||||
"scanPlaceholder": "Percorso di una cartella locale",
|
||||
"simpleModelPlaceholder": "URL o percorso di un file locale o di una cartella diffusori",
|
||||
"urlOrLocalPath": "URL o percorso locale",
|
||||
"urlOrLocalPathHelper": "Gli URL dovrebbero puntare a un singolo file. I percorsi locali possono puntare a un singolo file o cartella per un singolo modello di diffusore."
|
||||
"urlOrLocalPathHelper": "Gli URL dovrebbero puntare a un singolo file. I percorsi locali possono puntare a un singolo file o cartella per un singolo modello di diffusore.",
|
||||
"hfTokenHelperText": "Per utilizzare i modelli checkpoint è necessario un token HF. Clicca qui per creare o ottenere il tuo token.",
|
||||
"hfTokenInvalid": "Token HF non valido o mancante",
|
||||
"hfTokenInvalidErrorMessage": "Token HuggingFace non valido o mancante.",
|
||||
"hfTokenUnableToVerify": "Impossibile verificare il token HF",
|
||||
"hfTokenUnableToVerifyErrorMessage": "Impossibile verificare il token HuggingFace. Ciò è probabilmente dovuto a un errore di rete. Per favore riprova più tardi.",
|
||||
"hfTokenSaved": "Token HF salvato",
|
||||
"loraModels": "LoRA",
|
||||
"starterModels": "Modelli iniziali",
|
||||
"textualInversions": "Inversioni Testuali",
|
||||
"noModelsInstalled": "Nessun modello installato",
|
||||
"hfTokenInvalidErrorMessage2": "Aggiornalo in ",
|
||||
"main": "Principali",
|
||||
"noModelsInstalledDesc1": "Installa i modelli con"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Immagini",
|
||||
@@ -1381,7 +1394,8 @@
|
||||
"refinermodel": "Modello Affinatore",
|
||||
"posAestheticScore": "Punteggio estetico positivo",
|
||||
"posStylePrompt": "Prompt Stile positivo",
|
||||
"freePromptStyle": "Prompt di stile manuale"
|
||||
"freePromptStyle": "Prompt di stile manuale",
|
||||
"refinerSteps": "Passi Affinamento"
|
||||
},
|
||||
"metadata": {
|
||||
"initImage": "Immagine iniziale",
|
||||
@@ -1447,7 +1461,18 @@
|
||||
"uploadWorkflow": "Carica da file",
|
||||
"noWorkflows": "Nessun flusso di lavoro",
|
||||
"workflowCleared": "Flusso di lavoro cancellato",
|
||||
"saveWorkflowToProject": "Salva flusso di lavoro nel progetto"
|
||||
"saveWorkflowToProject": "Salva flusso di lavoro nel progetto",
|
||||
"noUserWorkflows": "Nessun flusso di lavoro utente",
|
||||
"defaultWorkflows": "Flussi di lavoro predefiniti",
|
||||
"userWorkflows": "I miei flussi di lavoro",
|
||||
"descending": "Discendente",
|
||||
"created": "Creato",
|
||||
"ascending": "Ascendente",
|
||||
"noRecentWorkflows": "Nessun flusso di lavoro recente",
|
||||
"name": "Nome",
|
||||
"updated": "Aggiornato",
|
||||
"projectWorkflows": "Flussi di lavoro del progetto",
|
||||
"opened": "Aperto"
|
||||
},
|
||||
"app": {
|
||||
"storeNotInitialized": "Il negozio non è inizializzato"
|
||||
|
||||
@@ -433,7 +433,21 @@
|
||||
"scanPlaceholder": "Путь к локальной папке",
|
||||
"simpleModelPlaceholder": "URL или путь к локальному файлу или папке diffusers",
|
||||
"urlOrLocalPath": "URL или локальный путь",
|
||||
"urlOrLocalPathHelper": "URL-адреса должны указывать на один файл. Локальные пути могут указывать на один файл или папку для одной модели диффузоров."
|
||||
"urlOrLocalPathHelper": "URL-адреса должны указывать на один файл. Локальные пути могут указывать на один файл или папку для одной модели диффузоров.",
|
||||
"hfToken": "Токен HuggingFace",
|
||||
"hfTokenInvalid": "Недействительный или отсутствующий HF-токен",
|
||||
"hfTokenInvalidErrorMessage2": "Обновите его в . ",
|
||||
"hfTokenUnableToVerify": "Невозможно проверить HF-токен",
|
||||
"hfTokenSaved": "HF-токен сохранен",
|
||||
"starterModels": "Стартовые модели",
|
||||
"textualInversions": "Текстовые инверсии",
|
||||
"hfTokenHelperText": "Для использования моделей контрольных точек требуется токен HF. Нажмите здесь, чтобы создать или получить свой токен.",
|
||||
"hfTokenInvalidErrorMessage": "Недействительный или отсутствующий HuggingFace токен.",
|
||||
"hfTokenUnableToVerifyErrorMessage": "Невозможно проверить токен HuggingFace. Вероятно, это связано с сетевой ошибкой. Пожалуйста, повторите попытку позже.",
|
||||
"loraModels": "LoRAs",
|
||||
"main": "Основные",
|
||||
"noModelsInstalled": "Нет установленных моделей",
|
||||
"noModelsInstalledDesc1": "Установите модели с помощью"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Изображения",
|
||||
@@ -1395,7 +1409,8 @@
|
||||
"loading": "Загрузка...",
|
||||
"steps": "Шаги",
|
||||
"posStylePrompt": "Запрос стиля",
|
||||
"freePromptStyle": "Ручной запрос стиля"
|
||||
"freePromptStyle": "Ручной запрос стиля",
|
||||
"refinerSteps": "Шаги доработчика"
|
||||
},
|
||||
"invocationCache": {
|
||||
"useCache": "Использовать кэш",
|
||||
@@ -1437,7 +1452,18 @@
|
||||
"newWorkflowCreated": "Создан новый рабочий процесс",
|
||||
"saveWorkflowToProject": "Сохранить рабочий процесс в проект",
|
||||
"workflowCleared": "Рабочий процесс очищен",
|
||||
"noWorkflows": "Нет рабочих процессов"
|
||||
"noWorkflows": "Нет рабочих процессов",
|
||||
"opened": "Открыто",
|
||||
"updated": "Обновлено",
|
||||
"noUserWorkflows": "Нет рабочих процессов пользователя",
|
||||
"ascending": "Восходящий",
|
||||
"created": "Создано",
|
||||
"descending": "Спуск",
|
||||
"userWorkflows": "Мои рабочие процессы",
|
||||
"projectWorkflows": "Рабочие процессы проекта",
|
||||
"defaultWorkflows": "Стандартные рабочие процессы",
|
||||
"name": "Имя",
|
||||
"noRecentWorkflows": "Нет последних рабочих процессов"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Включить исправление высокого разрешения",
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { setDefaultSettings } from 'features/parameters/store/actions';
|
||||
import {
|
||||
heightChanged,
|
||||
heightRecalled,
|
||||
setCfgRescaleMultiplier,
|
||||
setCfgScale,
|
||||
setScheduler,
|
||||
setSteps,
|
||||
vaePrecisionChanged,
|
||||
vaeSelected,
|
||||
widthChanged,
|
||||
widthRecalled,
|
||||
} from 'features/parameters/store/generationSlice';
|
||||
import {
|
||||
isParameterCFGRescaleMultiplier,
|
||||
@@ -100,13 +100,13 @@ export const addSetDefaultSettingsListener = (startAppListening: AppStartListeni
|
||||
|
||||
if (width) {
|
||||
if (isParameterWidth(width)) {
|
||||
dispatch(widthChanged(width));
|
||||
dispatch(widthRecalled(width));
|
||||
}
|
||||
}
|
||||
|
||||
if (height) {
|
||||
if (isParameterHeight(height)) {
|
||||
dispatch(heightChanged(height));
|
||||
dispatch(heightRecalled(height));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
import type { RgbaColor } from 'react-colorful';
|
||||
|
||||
export function rgbaToHex(color: RgbaColor, alpha: boolean = false): string {
|
||||
const hex = ((1 << 24) + (color.r << 16) + (color.g << 8) + color.b).toString(16).slice(1);
|
||||
const alphaHex = Math.round(color.a * 255)
|
||||
.toString(16)
|
||||
.padStart(2, '0');
|
||||
return alpha ? `#${hex}${alphaHex}` : `#${hex}`;
|
||||
}
|
||||
|
||||
export function hexToRGBA(hex: string, alpha: number) {
|
||||
hex = hex.replace(/^#/, '');
|
||||
const r = parseInt(hex.substring(0, 2), 16);
|
||||
const g = parseInt(hex.substring(2, 4), 16);
|
||||
const b = parseInt(hex.substring(4, 6), 16);
|
||||
return { r, g, b, a: alpha };
|
||||
}
|
||||
@@ -1,24 +1,36 @@
|
||||
import { CustomSelect, FormControl } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useModelCustomSelect } from 'common/hooks/useModelCustomSelect';
|
||||
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
|
||||
import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled';
|
||||
import { useControlAdapterModel } from 'features/controlAdapters/hooks/useControlAdapterModel';
|
||||
import { useControlAdapterModels } from 'features/controlAdapters/hooks/useControlAdapterModels';
|
||||
import { useControlAdapterType } from 'features/controlAdapters/hooks/useControlAdapterType';
|
||||
import { controlAdapterModelChanged } from 'features/controlAdapters/store/controlAdaptersSlice';
|
||||
import { selectGenerationSlice } from 'features/parameters/store/generationSlice';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import type { ControlNetModelConfig, IPAdapterModelConfig, T2IAdapterModelConfig } from 'services/api/types';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import type {
|
||||
AnyModelConfig,
|
||||
ControlNetModelConfig,
|
||||
IPAdapterModelConfig,
|
||||
T2IAdapterModelConfig,
|
||||
} from 'services/api/types';
|
||||
|
||||
type ParamControlAdapterModelProps = {
|
||||
id: string;
|
||||
};
|
||||
|
||||
const selectMainModel = createMemoizedSelector(selectGenerationSlice, (generation) => generation.model);
|
||||
|
||||
const ParamControlAdapterModel = ({ id }: ParamControlAdapterModelProps) => {
|
||||
const isEnabled = useControlAdapterIsEnabled(id);
|
||||
const controlAdapterType = useControlAdapterType(id);
|
||||
const { modelConfig } = useControlAdapterModel(id);
|
||||
const dispatch = useAppDispatch();
|
||||
const currentBaseModel = useAppSelector((s) => s.generation.model?.base);
|
||||
const mainModel = useAppSelector(selectMainModel);
|
||||
const { t } = useTranslation();
|
||||
|
||||
const [modelConfigs, { isLoading }] = useControlAdapterModels(controlAdapterType);
|
||||
|
||||
@@ -42,24 +54,35 @@ const ParamControlAdapterModel = ({ id }: ParamControlAdapterModelProps) => {
|
||||
[controlAdapterType, modelConfig]
|
||||
);
|
||||
|
||||
const { items, selectedItem, onChange, placeholder } = useModelCustomSelect({
|
||||
const getIsDisabled = useCallback(
|
||||
(model: AnyModelConfig): boolean => {
|
||||
const isCompatible = currentBaseModel === model.base;
|
||||
const hasMainModel = Boolean(currentBaseModel);
|
||||
return !hasMainModel || !isCompatible;
|
||||
},
|
||||
[currentBaseModel]
|
||||
);
|
||||
|
||||
const { options, value, onChange, noOptionsMessage } = useGroupedModelCombobox({
|
||||
modelConfigs,
|
||||
isLoading,
|
||||
selectedModel,
|
||||
onChange: _onChange,
|
||||
modelFilter: (model) => model.base === currentBaseModel,
|
||||
selectedModel,
|
||||
getIsDisabled,
|
||||
isLoading,
|
||||
});
|
||||
|
||||
return (
|
||||
<FormControl isDisabled={!items.length || !isEnabled} isInvalid={!selectedItem || !items.length}>
|
||||
<CustomSelect
|
||||
key={items.length}
|
||||
selectedItem={selectedItem}
|
||||
placeholder={placeholder}
|
||||
items={items}
|
||||
onChange={onChange}
|
||||
/>
|
||||
</FormControl>
|
||||
<Tooltip label={value?.description}>
|
||||
<FormControl isDisabled={!isEnabled} isInvalid={!value || mainModel?.base !== modelConfig?.base}>
|
||||
<Combobox
|
||||
options={options}
|
||||
placeholder={t('controlnet.selectModel')}
|
||||
value={value}
|
||||
onChange={onChange}
|
||||
noOptionsMessage={noOptionsMessage}
|
||||
/>
|
||||
</FormControl>
|
||||
</Tooltip>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ type CannyProcessorProps = {
|
||||
|
||||
const CannyProcessor = (props: CannyProcessorProps) => {
|
||||
const { controlNetId, processorNode, isEnabled } = props;
|
||||
const { low_threshold, high_threshold, image_resolution } = processorNode;
|
||||
const { low_threshold, high_threshold, image_resolution, detect_resolution } = processorNode;
|
||||
const processorChanged = useProcessorNodeChanged();
|
||||
const { t } = useTranslation();
|
||||
const defaults = useGetDefaultForControlnetProcessor(
|
||||
@@ -43,6 +43,13 @@ const CannyProcessor = (props: CannyProcessorProps) => {
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
const handleDetectResolutionChanged = useCallback(
|
||||
(v: number) => {
|
||||
processorChanged(controlNetId, { detect_resolution: v });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
@@ -97,6 +104,24 @@ const CannyProcessor = (props: CannyProcessorProps) => {
|
||||
max={4096}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<FormLabel>{t('controlnet.detectResolution')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={detect_resolution}
|
||||
onChange={handleDetectResolutionChanged}
|
||||
defaultValue={defaults.detect_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={detect_resolution}
|
||||
onChange={handleDetectResolutionChanged}
|
||||
defaultValue={defaults.detect_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -15,7 +15,7 @@ type Props = {
|
||||
|
||||
const MediapipeFaceProcessor = (props: Props) => {
|
||||
const { controlNetId, processorNode, isEnabled } = props;
|
||||
const { max_faces, min_confidence, image_resolution } = processorNode;
|
||||
const { max_faces, min_confidence, image_resolution, detect_resolution } = processorNode;
|
||||
const processorChanged = useProcessorNodeChanged();
|
||||
const { t } = useTranslation();
|
||||
|
||||
@@ -44,6 +44,13 @@ const MediapipeFaceProcessor = (props: Props) => {
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
const handleDetectResolutionChanged = useCallback(
|
||||
(v: number) => {
|
||||
processorChanged(controlNetId, { detect_resolution: v });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
@@ -102,6 +109,24 @@ const MediapipeFaceProcessor = (props: Props) => {
|
||||
max={4096}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<FormLabel>{t('controlnet.detectResolution')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={detect_resolution}
|
||||
onChange={handleDetectResolutionChanged}
|
||||
defaultValue={defaults.detect_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={detect_resolution}
|
||||
onChange={handleDetectResolutionChanged}
|
||||
defaultValue={defaults.detect_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -15,7 +15,7 @@ type Props = {
|
||||
|
||||
const MidasDepthProcessor = (props: Props) => {
|
||||
const { controlNetId, processorNode, isEnabled } = props;
|
||||
const { a_mult, bg_th, image_resolution } = processorNode;
|
||||
const { a_mult, bg_th, image_resolution, detect_resolution } = processorNode;
|
||||
const processorChanged = useProcessorNodeChanged();
|
||||
const { t } = useTranslation();
|
||||
|
||||
@@ -44,6 +44,13 @@ const MidasDepthProcessor = (props: Props) => {
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
const handleDetectResolutionChanged = useCallback(
|
||||
(v: number) => {
|
||||
processorChanged(controlNetId, { detect_resolution: v });
|
||||
},
|
||||
[controlNetId, processorChanged]
|
||||
);
|
||||
|
||||
return (
|
||||
<ProcessorWrapper>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
@@ -104,6 +111,24 @@ const MidasDepthProcessor = (props: Props) => {
|
||||
max={4096}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl isDisabled={!isEnabled}>
|
||||
<FormLabel>{t('controlnet.detectResolution')}</FormLabel>
|
||||
<CompositeSlider
|
||||
value={detect_resolution}
|
||||
onChange={handleDetectResolutionChanged}
|
||||
defaultValue={defaults.detect_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
marks
|
||||
/>
|
||||
<CompositeNumberInput
|
||||
value={detect_resolution}
|
||||
onChange={handleDetectResolutionChanged}
|
||||
defaultValue={defaults.detect_resolution}
|
||||
min={0}
|
||||
max={4096}
|
||||
/>
|
||||
</FormControl>
|
||||
</ProcessorWrapper>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -48,6 +48,7 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
|
||||
low_threshold: 100,
|
||||
high_threshold: 200,
|
||||
image_resolution: baseModel === 'sdxl' ? 1024 : 512,
|
||||
detect_resolution: baseModel === 'sdxl' ? 1024 : 512,
|
||||
}),
|
||||
},
|
||||
color_map_image_processor: {
|
||||
@@ -158,6 +159,7 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
|
||||
max_faces: 1,
|
||||
min_confidence: 0.5,
|
||||
image_resolution: baseModel === 'sdxl' ? 1024 : 512,
|
||||
detect_resolution: baseModel === 'sdxl' ? 1024 : 512,
|
||||
}),
|
||||
},
|
||||
midas_depth_image_processor: {
|
||||
@@ -174,6 +176,7 @@ export const CONTROLNET_PROCESSORS: ControlNetProcessorsDict = {
|
||||
a_mult: 2,
|
||||
bg_th: 0.1,
|
||||
image_resolution: baseModel === 'sdxl' ? 1024 : 512,
|
||||
detect_resolution: baseModel === 'sdxl' ? 1024 : 512,
|
||||
}),
|
||||
},
|
||||
mlsd_image_processor: {
|
||||
|
||||
@@ -37,10 +37,10 @@ export const {
|
||||
} = caAdapterSelectors;
|
||||
|
||||
const initialControlAdaptersState: ControlAdaptersState = caAdapter.getInitialState<{
|
||||
_version: 1;
|
||||
_version: 2;
|
||||
pendingControlImages: string[];
|
||||
}>({
|
||||
_version: 1,
|
||||
_version: 2,
|
||||
pendingControlImages: [],
|
||||
});
|
||||
|
||||
@@ -405,6 +405,9 @@ const migrateControlAdaptersState = (state: any): any => {
|
||||
if (!('_version' in state)) {
|
||||
state._version = 1;
|
||||
}
|
||||
if (state._version === 1) {
|
||||
state = cloneDeep(initialControlAdaptersState);
|
||||
}
|
||||
return state;
|
||||
};
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ export const isControlAdapterProcessorType = (v: unknown): v is ControlAdapterPr
|
||||
*/
|
||||
export type RequiredCannyImageProcessorInvocation = O.Required<
|
||||
CannyImageProcessorInvocation,
|
||||
'type' | 'low_threshold' | 'high_threshold' | 'image_resolution'
|
||||
'type' | 'low_threshold' | 'high_threshold' | 'image_resolution' | 'detect_resolution'
|
||||
>;
|
||||
|
||||
/**
|
||||
@@ -133,7 +133,7 @@ export type RequiredLineartImageProcessorInvocation = O.Required<
|
||||
*/
|
||||
export type RequiredMediapipeFaceProcessorInvocation = O.Required<
|
||||
MediapipeFaceProcessorInvocation,
|
||||
'type' | 'max_faces' | 'min_confidence' | 'image_resolution'
|
||||
'type' | 'max_faces' | 'min_confidence' | 'image_resolution' | 'detect_resolution'
|
||||
>;
|
||||
|
||||
/**
|
||||
@@ -141,7 +141,7 @@ export type RequiredMediapipeFaceProcessorInvocation = O.Required<
|
||||
*/
|
||||
export type RequiredMidasDepthImageProcessorInvocation = O.Required<
|
||||
MidasDepthImageProcessorInvocation,
|
||||
'type' | 'a_mult' | 'bg_th' | 'image_resolution'
|
||||
'type' | 'a_mult' | 'bg_th' | 'image_resolution' | 'detect_resolution'
|
||||
>;
|
||||
|
||||
/**
|
||||
|
||||
@@ -18,12 +18,12 @@ export const defaultLoRAConfig: Pick<LoRA, 'weight' | 'isEnabled'> = {
|
||||
};
|
||||
|
||||
type LoraState = {
|
||||
_version: 1;
|
||||
_version: 2;
|
||||
loras: Record<string, LoRA>;
|
||||
};
|
||||
|
||||
const initialLoraState: LoraState = {
|
||||
_version: 1,
|
||||
_version: 2,
|
||||
loras: {},
|
||||
};
|
||||
|
||||
@@ -72,6 +72,10 @@ const migrateLoRAState = (state: any): any => {
|
||||
if (!('_version' in state)) {
|
||||
state._version = 1;
|
||||
}
|
||||
if (state._version === 1) {
|
||||
// Model type has changed, so we need to reset the state - too risky to migrate
|
||||
state = cloneDeep(initialLoraState);
|
||||
}
|
||||
return state;
|
||||
};
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ export const StarterModelsResultItem = ({ result }: Props) => {
|
||||
const allSources = useMemo(() => {
|
||||
const _allSources = [result.source];
|
||||
if (result.dependencies) {
|
||||
_allSources.push(...result.dependencies);
|
||||
_allSources.push(...result.dependencies.map((d) => d.source));
|
||||
}
|
||||
return _allSources;
|
||||
}, [result]);
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
import { Box } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { hexToRGBA, rgbaToHex } from 'common/util/colorCodeTransformers';
|
||||
import { colorTokenToCssVar } from 'common/util/colorTokenToCssVar';
|
||||
import { fieldColorValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import type { ColorFieldInputInstance, ColorFieldInputTemplate } from 'features/nodes/types/field';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import type { RgbaColor } from 'react-colorful';
|
||||
import { RgbaColorPicker } from 'react-colorful';
|
||||
import { HexColorInput, RgbaColorPicker } from 'react-colorful';
|
||||
|
||||
import type { FieldComponentProps } from './types';
|
||||
|
||||
@@ -26,8 +29,12 @@ const ColorFieldInputComponent = (props: FieldComponentProps<ColorFieldInputInst
|
||||
}, [field.value]);
|
||||
|
||||
const handleValueChanged = useCallback(
|
||||
(value: RgbaColor) => {
|
||||
(value: RgbaColor | string) => {
|
||||
// We need to multiply by 255 to convert from 0-1 to 0-255, which is what the backend needs
|
||||
if (typeof value === 'string') {
|
||||
value = hexToRGBA(value, 1);
|
||||
}
|
||||
|
||||
const { r, g, b, a: _a } = value;
|
||||
const a = Math.round(_a * 255);
|
||||
dispatch(
|
||||
@@ -41,7 +48,27 @@ const ColorFieldInputComponent = (props: FieldComponentProps<ColorFieldInputInst
|
||||
[dispatch, field.name, nodeId]
|
||||
);
|
||||
|
||||
return <RgbaColorPicker className="nodrag" color={color} onChange={handleValueChanged} />;
|
||||
return (
|
||||
<Box sx={{ display: 'flex', flexDirection: 'column', gap: 2 }}>
|
||||
<HexColorInput
|
||||
style={{
|
||||
background: colorTokenToCssVar('base.700'),
|
||||
color: colorTokenToCssVar('base.100'),
|
||||
fontSize: 12,
|
||||
paddingInlineStart: 10,
|
||||
borderRadius: 4,
|
||||
paddingBlock: 4,
|
||||
outline: 'none',
|
||||
}}
|
||||
className="nodrag"
|
||||
color={rgbaToHex(color, true)}
|
||||
onChange={handleValueChanged}
|
||||
prefixed
|
||||
alpha
|
||||
/>
|
||||
<RgbaColorPicker className="nodrag" color={color} onChange={handleValueChanged} style={{ width: '100%' }} />
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default memo(ColorFieldInputComponent);
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { CustomSelect, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { Combobox, FormControl, FormLabel, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import { useModelCustomSelect } from 'common/hooks/useModelCustomSelect';
|
||||
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
|
||||
import { zModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { modelSelected } from 'features/parameters/store/actions';
|
||||
import { selectGenerationSlice } from 'features/parameters/store/generationSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useMainModels } from 'services/api/hooks/modelsByType';
|
||||
import type { MainModelConfig } from 'services/api/types';
|
||||
@@ -18,7 +18,12 @@ const ParamMainModelSelect = () => {
|
||||
const { t } = useTranslation();
|
||||
const selectedModel = useAppSelector(selectModel);
|
||||
const [modelConfigs, { isLoading }] = useMainModels();
|
||||
|
||||
const tooltipLabel = useMemo(() => {
|
||||
if (!modelConfigs.length || !selectedModel) {
|
||||
return;
|
||||
}
|
||||
return modelConfigs.find((m) => m.key === selectedModel?.key)?.description;
|
||||
}, [modelConfigs, selectedModel]);
|
||||
const _onChange = useCallback(
|
||||
(model: MainModelConfig | null) => {
|
||||
if (!model) {
|
||||
@@ -33,26 +38,28 @@ const ParamMainModelSelect = () => {
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
const { items, selectedItem, onChange, placeholder } = useModelCustomSelect({
|
||||
const { options, value, onChange, placeholder, noOptionsMessage } = useGroupedModelCombobox({
|
||||
modelConfigs,
|
||||
isLoading,
|
||||
selectedModel,
|
||||
onChange: _onChange,
|
||||
isLoading,
|
||||
});
|
||||
|
||||
return (
|
||||
<FormControl isDisabled={!items.length} isInvalid={!selectedItem || !items.length}>
|
||||
<InformationalPopover feature="paramModel">
|
||||
<FormLabel>{t('modelManager.model')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<CustomSelect
|
||||
key={items.length}
|
||||
selectedItem={selectedItem}
|
||||
placeholder={placeholder}
|
||||
items={items}
|
||||
onChange={onChange}
|
||||
/>
|
||||
</FormControl>
|
||||
<Tooltip label={tooltipLabel}>
|
||||
<FormControl isDisabled={!modelConfigs.length} isInvalid={!value || !modelConfigs.length}>
|
||||
<InformationalPopover feature="paramModel">
|
||||
<FormLabel>{t('modelManager.model')}</FormLabel>
|
||||
</InformationalPopover>
|
||||
<Combobox
|
||||
value={value}
|
||||
placeholder={placeholder}
|
||||
options={options}
|
||||
onChange={onChange}
|
||||
noOptionsMessage={noOptionsMessage}
|
||||
/>
|
||||
</FormControl>
|
||||
</Tooltip>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ import type { ImageDTO } from 'services/api/types';
|
||||
import type { GenerationState } from './types';
|
||||
|
||||
const initialGenerationState: GenerationState = {
|
||||
_version: 1,
|
||||
_version: 2,
|
||||
cfgScale: 7.5,
|
||||
cfgRescaleMultiplier: 0,
|
||||
height: 512,
|
||||
@@ -276,6 +276,11 @@ const migrateGenerationState = (state: any): GenerationState => {
|
||||
state._version = 1;
|
||||
state.aspectRatio = initialAspectRatioState;
|
||||
}
|
||||
if (state._version === 1) {
|
||||
// The signature of the model has changed, so we need to reset it
|
||||
state._version = 2;
|
||||
state.model = null;
|
||||
}
|
||||
return state;
|
||||
};
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ import type {
|
||||
} from 'features/parameters/types/parameterSchemas';
|
||||
|
||||
export interface GenerationState {
|
||||
_version: 1;
|
||||
_version: 2;
|
||||
cfgScale: ParameterCFGScale;
|
||||
cfgRescaleMultiplier: ParameterCFGRescaleMultiplier;
|
||||
height: ParameterHeight;
|
||||
|
||||
@@ -9,7 +9,7 @@ import type {
|
||||
} from 'features/parameters/types/parameterSchemas';
|
||||
|
||||
type SDXLState = {
|
||||
_version: 1;
|
||||
_version: 2;
|
||||
positiveStylePrompt: ParameterPositiveStylePromptSDXL;
|
||||
negativeStylePrompt: ParameterNegativeStylePromptSDXL;
|
||||
shouldConcatSDXLStylePrompt: boolean;
|
||||
@@ -23,7 +23,7 @@ type SDXLState = {
|
||||
};
|
||||
|
||||
const initialSDXLState: SDXLState = {
|
||||
_version: 1,
|
||||
_version: 2,
|
||||
positiveStylePrompt: '',
|
||||
negativeStylePrompt: '',
|
||||
shouldConcatSDXLStylePrompt: true,
|
||||
@@ -93,6 +93,11 @@ const migrateSDXLState = (state: any): any => {
|
||||
if (!('_version' in state)) {
|
||||
state._version = 1;
|
||||
}
|
||||
if (state._version === 1) {
|
||||
// Model type has changed, so we need to reset the state - too risky to migrate
|
||||
state._version = 2;
|
||||
state.refinerModel = null;
|
||||
}
|
||||
return state;
|
||||
};
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
__version__ = "4.0.0rc4"
|
||||
__version__ = "4.0.0rc5"
|
||||
|
||||
@@ -112,8 +112,8 @@ dependencies = [
|
||||
]
|
||||
"dev" = ["jurigged", "pudb", "snakeviz", "gprof2dot"]
|
||||
"test" = [
|
||||
"ruff",
|
||||
"ruff-lsp",
|
||||
"ruff>=0.3.3",
|
||||
"ruff-lsp>=0.0.53",
|
||||
"mypy",
|
||||
"pre-commit",
|
||||
"pytest>6.0.0",
|
||||
|
||||
@@ -20,8 +20,8 @@ parser.add_argument(
|
||||
parser.add_argument(
|
||||
"--hash_algo",
|
||||
type=str,
|
||||
default="blake3",
|
||||
help=f"Hashing algorithm to use (default: blake3), one of: {algos}",
|
||||
default="blake3_single",
|
||||
help=f"Hashing algorithm to use (default: blake3_single), one of: {algos}",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
@@ -51,6 +51,7 @@ def session() -> Session:
|
||||
return sess
|
||||
|
||||
|
||||
@pytest.mark.timeout(timeout=20, method="thread")
|
||||
def test_basic_queue_download(tmp_path: Path, session: Session) -> None:
|
||||
events = set()
|
||||
|
||||
@@ -80,6 +81,7 @@ def test_basic_queue_download(tmp_path: Path, session: Session) -> None:
|
||||
queue.stop()
|
||||
|
||||
|
||||
@pytest.mark.timeout(timeout=20, method="thread")
|
||||
def test_errors(tmp_path: Path, session: Session) -> None:
|
||||
queue = DownloadQueueService(
|
||||
requests_session=session,
|
||||
@@ -101,6 +103,7 @@ def test_errors(tmp_path: Path, session: Session) -> None:
|
||||
queue.stop()
|
||||
|
||||
|
||||
@pytest.mark.timeout(timeout=20, method="thread")
|
||||
def test_event_bus(tmp_path: Path, session: Session) -> None:
|
||||
event_bus = TestEventService()
|
||||
|
||||
@@ -136,6 +139,7 @@ def test_event_bus(tmp_path: Path, session: Session) -> None:
|
||||
queue.stop()
|
||||
|
||||
|
||||
@pytest.mark.timeout(timeout=20, method="thread")
|
||||
def test_broken_callbacks(tmp_path: Path, session: Session, capsys) -> None:
|
||||
queue = DownloadQueueService(
|
||||
requests_session=session,
|
||||
|
||||
@@ -5,6 +5,7 @@ Test the model installer
|
||||
import platform
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
|
||||
import pytest
|
||||
from pydantic import ValidationError
|
||||
@@ -19,7 +20,7 @@ from invokeai.app.services.model_install import (
|
||||
ModelInstallServiceBase,
|
||||
URLModelSource,
|
||||
)
|
||||
from invokeai.app.services.model_records import UnknownModelException
|
||||
from invokeai.app.services.model_records import ModelRecordChanges, UnknownModelException
|
||||
from invokeai.backend.model_manager.config import BaseModelType, InvalidModelConfigException, ModelFormat, ModelType
|
||||
from tests.backend.model_manager.model_manager_fixtures import * # noqa F403
|
||||
|
||||
@@ -81,6 +82,18 @@ def test_install(
|
||||
assert model_record.source == embedding_file.as_posix()
|
||||
|
||||
|
||||
def test_rename(
|
||||
mm2_installer: ModelInstallServiceBase, embedding_file: Path, mm2_app_config: InvokeAIAppConfig
|
||||
) -> None:
|
||||
store = mm2_installer.record_store
|
||||
key = mm2_installer.install_path(embedding_file)
|
||||
model_record = store.get_model(key)
|
||||
assert model_record.path.endswith("sd-1/embedding/test_embedding.safetensors")
|
||||
store.update_model(key, ModelRecordChanges(name="new_name.safetensors", base=BaseModelType("sd-2")))
|
||||
new_model_record = mm2_installer.sync_model_path(key)
|
||||
assert new_model_record.path.endswith("sd-2/embedding/new_name.safetensors")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"fixture_name,size,destination",
|
||||
[
|
||||
@@ -276,48 +289,48 @@ def test_404_download(mm2_installer: ModelInstallServiceBase, mm2_app_config: In
|
||||
|
||||
|
||||
# TODO: Fix bug in model install causing jobs to get installed multiple times then uncomment this test
|
||||
# @pytest.mark.parametrize(
|
||||
# "model_params",
|
||||
# [
|
||||
# # SDXL, Lora
|
||||
# {
|
||||
# "repo_id": "InvokeAI-test/textual_inversion_tests::learned_embeds-steps-1000.safetensors",
|
||||
# "name": "test_lora",
|
||||
# "type": "embedding",
|
||||
# },
|
||||
# # SDXL, Lora - incorrect type
|
||||
# {
|
||||
# "repo_id": "InvokeAI-test/textual_inversion_tests::learned_embeds-steps-1000.safetensors",
|
||||
# "name": "test_lora",
|
||||
# "type": "lora",
|
||||
# },
|
||||
# ],
|
||||
# )
|
||||
# @pytest.mark.timeout(timeout=40, method="thread")
|
||||
# def test_heuristic_import_with_type(mm2_installer: ModelInstallServiceBase, model_params: Dict[str, str]):
|
||||
# """Test whether or not type is respected on configs when passed to heuristic import."""
|
||||
# assert "name" in model_params and "type" in model_params
|
||||
# config1: Dict[str, Any] = {
|
||||
# "name": f"{model_params['name']}_1",
|
||||
# "type": model_params["type"],
|
||||
# "hash": "placeholder1",
|
||||
# }
|
||||
# config2: Dict[str, Any] = {
|
||||
# "name": f"{model_params['name']}_2",
|
||||
# "type": ModelType(model_params["type"]),
|
||||
# "hash": "placeholder2",
|
||||
# }
|
||||
# assert "repo_id" in model_params
|
||||
# install_job1 = mm2_installer.heuristic_import(source=model_params["repo_id"], config=config1)
|
||||
# mm2_installer.wait_for_job(install_job1, timeout=20)
|
||||
# if model_params["type"] != "embedding":
|
||||
# assert install_job1.errored
|
||||
# assert install_job1.error_type == "InvalidModelConfigException"
|
||||
# return
|
||||
# assert install_job1.complete
|
||||
# assert install_job1.config_out if model_params["type"] == "embedding" else not install_job1.config_out
|
||||
@pytest.mark.parametrize(
|
||||
"model_params",
|
||||
[
|
||||
# SDXL, Lora
|
||||
{
|
||||
"repo_id": "InvokeAI-test/textual_inversion_tests::learned_embeds-steps-1000.safetensors",
|
||||
"name": "test_lora",
|
||||
"type": "embedding",
|
||||
},
|
||||
# SDXL, Lora - incorrect type
|
||||
{
|
||||
"repo_id": "InvokeAI-test/textual_inversion_tests::learned_embeds-steps-1000.safetensors",
|
||||
"name": "test_lora",
|
||||
"type": "lora",
|
||||
},
|
||||
],
|
||||
)
|
||||
@pytest.mark.timeout(timeout=40, method="thread")
|
||||
def test_heuristic_import_with_type(mm2_installer: ModelInstallServiceBase, model_params: Dict[str, str]):
|
||||
"""Test whether or not type is respected on configs when passed to heuristic import."""
|
||||
assert "name" in model_params and "type" in model_params
|
||||
config1: Dict[str, Any] = {
|
||||
"name": f"{model_params['name']}_1",
|
||||
"type": model_params["type"],
|
||||
"hash": "placeholder1",
|
||||
}
|
||||
config2: Dict[str, Any] = {
|
||||
"name": f"{model_params['name']}_2",
|
||||
"type": ModelType(model_params["type"]),
|
||||
"hash": "placeholder2",
|
||||
}
|
||||
assert "repo_id" in model_params
|
||||
install_job1 = mm2_installer.heuristic_import(source=model_params["repo_id"], config=config1)
|
||||
mm2_installer.wait_for_job(install_job1, timeout=20)
|
||||
if model_params["type"] != "embedding":
|
||||
assert install_job1.errored
|
||||
assert install_job1.error_type == "InvalidModelConfigException"
|
||||
return
|
||||
assert install_job1.complete
|
||||
assert install_job1.config_out if model_params["type"] == "embedding" else not install_job1.config_out
|
||||
|
||||
# install_job2 = mm2_installer.heuristic_import(source=model_params["repo_id"], config=config2)
|
||||
# mm2_installer.wait_for_job(install_job2, timeout=20)
|
||||
# assert install_job2.complete
|
||||
# assert install_job2.config_out if model_params["type"] == "embedding" else not install_job2.config_out
|
||||
install_job2 = mm2_installer.heuristic_import(source=model_params["repo_id"], config=config2)
|
||||
mm2_installer.wait_for_job(install_job2, timeout=20)
|
||||
assert install_job2.complete
|
||||
assert install_job2.config_out if model_params["type"] == "embedding" else not install_job2.config_out
|
||||
|
||||
@@ -2,13 +2,11 @@
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
from pytest import FixtureRequest
|
||||
from requests.sessions import Session
|
||||
from requests_testadapter import TestAdapter, TestSession
|
||||
|
||||
@@ -99,15 +97,11 @@ def mm2_app_config(mm2_root_dir: Path) -> InvokeAIAppConfig:
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mm2_download_queue(mm2_session: Session, request: FixtureRequest) -> DownloadQueueServiceBase:
|
||||
def mm2_download_queue(mm2_session: Session) -> DownloadQueueServiceBase:
|
||||
download_queue = DownloadQueueService(requests_session=mm2_session)
|
||||
download_queue.start()
|
||||
|
||||
def stop_queue() -> None:
|
||||
download_queue.stop()
|
||||
|
||||
request.addfinalizer(stop_queue)
|
||||
return download_queue
|
||||
yield download_queue
|
||||
download_queue.stop()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -130,7 +124,6 @@ def mm2_installer(
|
||||
mm2_app_config: InvokeAIAppConfig,
|
||||
mm2_download_queue: DownloadQueueServiceBase,
|
||||
mm2_session: Session,
|
||||
request: FixtureRequest,
|
||||
) -> ModelInstallServiceBase:
|
||||
logger = InvokeAILogger.get_logger()
|
||||
db = create_mock_sqlite_database(mm2_app_config, logger)
|
||||
@@ -145,13 +138,8 @@ def mm2_installer(
|
||||
session=mm2_session,
|
||||
)
|
||||
installer.start()
|
||||
|
||||
def stop_installer() -> None:
|
||||
installer.stop()
|
||||
time.sleep(0.1) # avoid error message from the logger when it is closed before thread prints final message
|
||||
|
||||
request.addfinalizer(stop_installer)
|
||||
return installer
|
||||
yield installer
|
||||
installer.stop()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -6,7 +6,13 @@ import pytest
|
||||
from omegaconf import OmegaConf
|
||||
from pydantic import ValidationError
|
||||
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig, get_config, load_and_migrate_config
|
||||
from invokeai.app.services.config.config_default import (
|
||||
DefaultInvokeAIAppConfig,
|
||||
InvokeAIAppConfig,
|
||||
get_config,
|
||||
load_and_migrate_config,
|
||||
)
|
||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
|
||||
v4_config = """
|
||||
schema_version: 4.0.0
|
||||
@@ -59,14 +65,14 @@ def patch_rootdir(tmp_path: Path, monkeypatch: Any) -> None:
|
||||
monkeypatch.setenv("INVOKEAI_ROOT", str(tmp_path))
|
||||
|
||||
|
||||
def test_path_resolution_root_not_set():
|
||||
def test_path_resolution_root_not_set(patch_rootdir: None):
|
||||
"""Test path resolutions when the root is not explicitly set."""
|
||||
config = InvokeAIAppConfig()
|
||||
expected_root = InvokeAIAppConfig.find_root()
|
||||
assert config.root_path == expected_root
|
||||
|
||||
|
||||
def test_read_config_from_file(tmp_path: Path):
|
||||
def test_read_config_from_file(tmp_path: Path, patch_rootdir: None):
|
||||
"""Test reading configuration from a file."""
|
||||
temp_config_file = tmp_path / "temp_invokeai.yaml"
|
||||
temp_config_file.write_text(v4_config)
|
||||
@@ -76,7 +82,7 @@ def test_read_config_from_file(tmp_path: Path):
|
||||
assert config.port == 8080
|
||||
|
||||
|
||||
def test_migrate_v3_config_from_file(tmp_path: Path):
|
||||
def test_migrate_v3_config_from_file(tmp_path: Path, patch_rootdir: None):
|
||||
"""Test reading configuration from a file."""
|
||||
temp_config_file = tmp_path / "temp_invokeai.yaml"
|
||||
temp_config_file.write_text(v3_config)
|
||||
@@ -92,7 +98,7 @@ def test_migrate_v3_config_from_file(tmp_path: Path):
|
||||
assert not hasattr(config, "esrgan")
|
||||
|
||||
|
||||
def test_migrate_v3_backup(tmp_path: Path):
|
||||
def test_migrate_v3_backup(tmp_path: Path, patch_rootdir: None):
|
||||
"""Test the backup of the config file."""
|
||||
temp_config_file = tmp_path / "temp_invokeai.yaml"
|
||||
temp_config_file.write_text(v3_config)
|
||||
@@ -102,7 +108,7 @@ def test_migrate_v3_backup(tmp_path: Path):
|
||||
assert temp_config_file.with_suffix(".yaml.bak").read_text() == v3_config
|
||||
|
||||
|
||||
def test_failed_migrate_backup(tmp_path: Path):
|
||||
def test_failed_migrate_backup(tmp_path: Path, patch_rootdir: None):
|
||||
"""Test the failed migration of the config file."""
|
||||
temp_config_file = tmp_path / "temp_invokeai.yaml"
|
||||
temp_config_file.write_text(v3_config_with_bad_values)
|
||||
@@ -115,7 +121,7 @@ def test_failed_migrate_backup(tmp_path: Path):
|
||||
assert temp_config_file.read_text() == v3_config_with_bad_values
|
||||
|
||||
|
||||
def test_bails_on_invalid_config(tmp_path: Path):
|
||||
def test_bails_on_invalid_config(tmp_path: Path, patch_rootdir: None):
|
||||
"""Test reading configuration from a file."""
|
||||
temp_config_file = tmp_path / "temp_invokeai.yaml"
|
||||
temp_config_file.write_text(invalid_config)
|
||||
@@ -124,7 +130,7 @@ def test_bails_on_invalid_config(tmp_path: Path):
|
||||
load_and_migrate_config(temp_config_file)
|
||||
|
||||
|
||||
def test_bails_on_config_with_unsupported_version(tmp_path: Path):
|
||||
def test_bails_on_config_with_unsupported_version(tmp_path: Path, patch_rootdir: None):
|
||||
"""Test reading configuration from a file."""
|
||||
temp_config_file = tmp_path / "temp_invokeai.yaml"
|
||||
temp_config_file.write_text(invalid_v5_config)
|
||||
@@ -133,7 +139,7 @@ def test_bails_on_config_with_unsupported_version(tmp_path: Path):
|
||||
load_and_migrate_config(temp_config_file)
|
||||
|
||||
|
||||
def test_write_config_to_file():
|
||||
def test_write_config_to_file(patch_rootdir: None):
|
||||
"""Test writing configuration to a file, checking for correct output."""
|
||||
with TemporaryDirectory() as tmpdir:
|
||||
temp_config_path = Path(tmpdir) / "invokeai.yaml"
|
||||
@@ -148,7 +154,7 @@ def test_write_config_to_file():
|
||||
assert "port: 8080" in content
|
||||
|
||||
|
||||
def test_update_config_with_dict():
|
||||
def test_update_config_with_dict(patch_rootdir: None):
|
||||
"""Test updating the config with a dictionary."""
|
||||
config = InvokeAIAppConfig()
|
||||
update_dict = {"host": "10.10.10.10", "port": 6060}
|
||||
@@ -157,7 +163,7 @@ def test_update_config_with_dict():
|
||||
assert config.port == 6060
|
||||
|
||||
|
||||
def test_update_config_with_object():
|
||||
def test_update_config_with_object(patch_rootdir: None):
|
||||
"""Test updating the config with another config object."""
|
||||
config = InvokeAIAppConfig()
|
||||
new_config = InvokeAIAppConfig(host="10.10.10.10", port=6060)
|
||||
@@ -166,7 +172,7 @@ def test_update_config_with_object():
|
||||
assert config.port == 6060
|
||||
|
||||
|
||||
def test_set_and_resolve_paths():
|
||||
def test_set_and_resolve_paths(patch_rootdir: None):
|
||||
"""Test setting root and resolving paths based on it."""
|
||||
with TemporaryDirectory() as tmpdir:
|
||||
config = InvokeAIAppConfig()
|
||||
@@ -175,11 +181,62 @@ def test_set_and_resolve_paths():
|
||||
assert config.db_path == Path(tmpdir).resolve() / "databases" / "invokeai.db"
|
||||
|
||||
|
||||
def test_singleton_behavior():
|
||||
def test_singleton_behavior(patch_rootdir: None):
|
||||
"""Test that get_config always returns the same instance."""
|
||||
get_config.cache_clear()
|
||||
config1 = get_config()
|
||||
config2 = get_config()
|
||||
assert config1 is config2
|
||||
get_config.cache_clear()
|
||||
|
||||
|
||||
def test_default_config(patch_rootdir: None):
|
||||
"""Test that the default config is as expected."""
|
||||
config = DefaultInvokeAIAppConfig()
|
||||
assert config.host == "127.0.0.1"
|
||||
|
||||
|
||||
def test_env_vars(patch_rootdir: None, monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
|
||||
"""Test that environment variables are merged into the config"""
|
||||
monkeypatch.setenv("INVOKEAI_ROOT", str(tmp_path))
|
||||
monkeypatch.setenv("INVOKEAI_HOST", "1.2.3.4")
|
||||
monkeypatch.setenv("INVOKEAI_PORT", "1234")
|
||||
config = InvokeAIAppConfig()
|
||||
assert config.host == "1.2.3.4"
|
||||
assert config.port == 1234
|
||||
assert config.root_path == tmp_path
|
||||
|
||||
|
||||
def test_get_config_writing(patch_rootdir: None, monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
|
||||
"""Test that get_config writes the appropriate files to disk"""
|
||||
# Trick the config into thinking it has already parsed args - this triggers the writing of the config file
|
||||
InvokeAIArgs.did_parse = True
|
||||
|
||||
monkeypatch.setenv("INVOKEAI_ROOT", str(tmp_path))
|
||||
monkeypatch.setenv("INVOKEAI_HOST", "1.2.3.4")
|
||||
get_config.cache_clear()
|
||||
config = get_config()
|
||||
get_config.cache_clear()
|
||||
config_file_path = tmp_path / "invokeai.yaml"
|
||||
example_file_path = config_file_path.with_suffix(".example.yaml")
|
||||
assert config.config_file_path == config_file_path
|
||||
assert config_file_path.exists()
|
||||
assert example_file_path.exists()
|
||||
|
||||
# The example file should have the default values
|
||||
example_file_content = example_file_path.read_text()
|
||||
assert "host: 127.0.0.1" in example_file_content
|
||||
assert "port: 9090" in example_file_content
|
||||
|
||||
# It should also have the `remote_api_tokens` key
|
||||
assert "remote_api_tokens" in example_file_content
|
||||
|
||||
# Neither env vars nor default values should be written to the config file
|
||||
config_file_content = config_file_path.read_text()
|
||||
assert "host" not in config_file_content
|
||||
|
||||
# Undo our change to the singleton class
|
||||
InvokeAIArgs.did_parse = False
|
||||
|
||||
|
||||
@pytest.mark.xfail(
|
||||
@@ -212,7 +269,9 @@ def test_deny_nodes(patch_rootdir):
|
||||
"""
|
||||
)
|
||||
# must parse config before importing Graph, so its nodes union uses the config
|
||||
get_config.cache_clear()
|
||||
conf = get_config()
|
||||
get_config.cache_clear()
|
||||
conf.merge_from_file(conf=allow_deny_nodes_conf, argv=[])
|
||||
from invokeai.app.services.shared.graph import Graph
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ test_cases: list[tuple[HASHING_ALGORITHMS, str]] = [
|
||||
"sha512",
|
||||
"sha512:c4a10476b21e00042f638ad5755c561d91f2bb599d3504d25409495e1c7eda94543332a1a90fbb4efdaf9ee462c33e0336b5eae4acfb1fa0b186af452dd67dc6",
|
||||
),
|
||||
("blake3", "blake3:ce3f0c5f3c05d119f4a5dcaf209b50d3149046a0d3a9adee9fed4c83cad6b4d0"),
|
||||
("blake3_multi", "blake3:ce3f0c5f3c05d119f4a5dcaf209b50d3149046a0d3a9adee9fed4c83cad6b4d0"),
|
||||
("blake3_single", "blake3:ce3f0c5f3c05d119f4a5dcaf209b50d3149046a0d3a9adee9fed4c83cad6b4d0"),
|
||||
]
|
||||
|
||||
@@ -29,7 +29,7 @@ def test_model_hash_hashes_file(tmp_path: Path, algorithm: HASHING_ALGORITHMS, e
|
||||
assert hash_ == expected_hash
|
||||
|
||||
|
||||
@pytest.mark.parametrize("algorithm", ["md5", "sha1", "sha256", "sha512", "blake3", "blake3_single"])
|
||||
@pytest.mark.parametrize("algorithm", ["md5", "sha1", "sha256", "sha512", "blake3_multi", "blake3_single"])
|
||||
def test_model_hash_hashes_dir(tmp_path: Path, algorithm: HASHING_ALGORITHMS):
|
||||
model_hash = ModelHash(algorithm)
|
||||
files = [Path(tmp_path, f"{i}.bin") for i in range(5)]
|
||||
@@ -58,7 +58,7 @@ def test_model_hash_hashes_dir(tmp_path: Path, algorithm: HASHING_ALGORITHMS):
|
||||
("sha1", "sha1:"),
|
||||
("sha256", "sha256:"),
|
||||
("sha512", "sha512:"),
|
||||
("blake3", "blake3:"),
|
||||
("blake3_multi", "blake3:"),
|
||||
("blake3_single", "blake3:"),
|
||||
],
|
||||
)
|
||||
@@ -67,7 +67,7 @@ def test_model_hash_gets_prefix(algorithm: HASHING_ALGORITHMS, expected_prefix:
|
||||
|
||||
|
||||
def test_model_hash_blake3_matches_blake3_single(tmp_path: Path):
|
||||
model_hash = ModelHash("blake3")
|
||||
model_hash = ModelHash("blake3_multi")
|
||||
model_hash_simple = ModelHash("blake3_single")
|
||||
|
||||
file = tmp_path / "test.bin"
|
||||
|
||||
Reference in New Issue
Block a user