Compare commits

..

1 Commits

Author SHA1 Message Date
Lincoln Stein
76adcc122b remove deprecated binary installer 2023-02-27 00:24:52 -05:00
123 changed files with 4134 additions and 7034 deletions

View File

@@ -1,6 +0,0 @@
[run]
omit='.env/*'
source='.'
[report]
show_missing = true

View File

@@ -1,8 +1,5 @@
root = true
# All files
[*]
max_line_length = 80
charset = utf-8
end_of_line = lf
indent_size = 2
@@ -13,18 +10,3 @@ trim_trailing_whitespace = true
# Python
[*.py]
indent_size = 4
max_line_length = 120
# css
[*.css]
indent_size = 4
# flake8
[.flake8]
indent_size = 4
# Markdown MkDocs
[docs/**/*.md]
max_line_length = 80
indent_size = 4
indent_style = unset

37
.flake8
View File

@@ -1,37 +0,0 @@
[flake8]
max-line-length = 120
extend-ignore =
# See https://github.com/PyCQA/pycodestyle/issues/373
E203,
# use Bugbear's B950 instead
E501,
# from black repo https://github.com/psf/black/blob/main/.flake8
E266, W503, B907
extend-select =
# Bugbear line length
B950
extend-exclude =
scripts/orig_scripts/*
ldm/models/*
ldm/modules/*
ldm/data/*
ldm/generate.py
ldm/util.py
ldm/simplet2i.py
per-file-ignores =
# B950 line too long
# W605 invalid escape sequence
# F841 assigned to but never used
# F401 imported but unused
tests/test_prompt_parser.py: B950, W605, F401
tests/test_textual_inversion.py: F841, B950
# B023 Function definition does not bind loop variable
scripts/legacy_api.py: F401, B950, B023, F841
ldm/invoke/__init__.py: F401
# B010 Do not call setattr with a constant attribute value
ldm/invoke/server_legacy.py: B010
# =====================
# flake-quote settings:
# =====================
# Set this to match black style:
inline-quotes = double

50
.github/CODEOWNERS vendored
View File

@@ -2,60 +2,50 @@
/.github/workflows/ @mauwii @lstein @blessedcoolant
# documentation
/docs/ @lstein @mauwii @blessedcoolant
mkdocs.yml @mauwii @lstein
/docs/ @lstein @mauwii @tildebyte @blessedcoolant
mkdocs.yml @lstein @mauwii @blessedcoolant
# installation and configuration
/pyproject.toml @mauwii @lstein @ebr
/docker/ @mauwii
/pyproject.toml @mauwii @lstein @ebr @blessedcoolant
/docker/ @mauwii @lstein @blessedcoolant
/scripts/ @ebr @lstein @blessedcoolant
/installer/ @ebr @lstein
ldm/invoke/config @lstein @ebr
invokeai/assets @lstein @blessedcoolant
/installer/ @ebr @lstein @tildebyte @blessedcoolant
ldm/invoke/config @lstein @ebr @blessedcoolant
invokeai/assets @lstein @ebr @blessedcoolant
invokeai/configs @lstein @ebr @blessedcoolant
/ldm/invoke/_version.py @lstein @blessedcoolant
# web ui
/invokeai/frontend @blessedcoolant @psychedelicious
/invokeai/backend @blessedcoolant @psychedelicious
/invokeai/frontend @blessedcoolant @psychedelicious @lstein
/invokeai/backend @blessedcoolant @psychedelicious @lstein
# generation and model management
/ldm/*.py @lstein @blessedcoolant
/ldm/generate.py @lstein @keturn
/ldm/generate.py @lstein @keturn @blessedcoolant
/ldm/invoke/args.py @lstein @blessedcoolant
/ldm/invoke/ckpt* @lstein @blessedcoolant
/ldm/invoke/ckpt_generator @lstein @blessedcoolant
/ldm/invoke/CLI.py @lstein @blessedcoolant
/ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
/ldm/invoke/generator @keturn @damian0815
/ldm/invoke/generator @keturn @damian0815 @blessedcoolant
/ldm/invoke/globals.py @lstein @blessedcoolant
/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
/ldm/invoke/model_manager.py @lstein @blessedcoolant
/ldm/invoke/txt2mask.py @lstein @blessedcoolant
/ldm/invoke/patchmatch.py @Kyle0654 @lstein
/ldm/invoke/patchmatch.py @Kyle0654 @blessedcoolant @lstein
/ldm/invoke/restoration @lstein @blessedcoolant
# attention, textual inversion, model configuration
/ldm/models @damian0815 @keturn @blessedcoolant
/ldm/modules/textual_inversion_manager.py @lstein @blessedcoolant
/ldm/modules/attention.py @damian0815 @keturn
/ldm/modules/diffusionmodules @damian0815 @keturn
/ldm/modules/distributions @damian0815 @keturn
/ldm/modules/ema.py @damian0815 @keturn
/ldm/modules/embedding_manager.py @lstein
/ldm/modules/encoders @damian0815 @keturn
/ldm/modules/image_degradation @damian0815 @keturn
/ldm/modules/losses @damian0815 @keturn
/ldm/modules/x_transformer.py @damian0815 @keturn
/ldm/models @damian0815 @keturn @lstein @blessedcoolant
/ldm/modules @damian0815 @keturn @lstein @blessedcoolant
# Nodes
apps/ @Kyle0654 @jpphoto
apps/ @Kyle0654 @lstein @blessedcoolant
# legacy REST API
# these are dead code
#/ldm/invoke/pngwriter.py @CapableWeb
#/ldm/invoke/server_legacy.py @CapableWeb
#/scripts/legacy_api.py @CapableWeb
#/tests/legacy_tests.sh @CapableWeb
# is CapableWeb still engaged?
/ldm/invoke/pngwriter.py @CapableWeb @lstein @blessedcoolant
/ldm/invoke/server_legacy.py @CapableWeb @lstein @blessedcoolant
/scripts/legacy_api.py @CapableWeb @lstein @blessedcoolant
/tests/legacy_tests.sh @CapableWeb @lstein @blessedcoolant

View File

@@ -9,10 +9,6 @@ jobs:
mkdocs-material:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
env:
REPO_URL: '${{ github.server_url }}/${{ github.repository }}'
REPO_NAME: '${{ github.repository }}'
SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI'
steps:
- name: checkout sources
uses: actions/checkout@v3
@@ -23,15 +19,11 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: '3.10'
cache: pip
cache-dependency-path: pyproject.toml
- name: install requirements
env:
PIP_USE_PEP517: 1
run: |
python -m \
pip install ".[docs]"
pip install -r docs/requirements-mkdocs.txt
- name: confirm buildability
run: |

1
.gitignore vendored
View File

@@ -68,7 +68,6 @@ htmlcov/
.cache
nosetests.xml
coverage.xml
cov.xml
*.cover
*.py,cover
.hypothesis/

View File

@@ -1,41 +0,0 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/psf/black
rev: 23.1.0
hooks:
- id: black
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
hooks:
- id: flake8
additional_dependencies:
- flake8-black
- flake8-bugbear
- flake8-comprehensions
- flake8-simplify
- repo: https://github.com/pre-commit/mirrors-prettier
rev: 'v3.0.0-alpha.4'
hooks:
- id: prettier
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
- id: check-merge-conflict
- id: check-symlinks
- id: check-toml
- id: end-of-file-fixer
- id: no-commit-to-branch
args: ['--branch', 'main']
- id: trailing-whitespace

View File

@@ -1,14 +0,0 @@
invokeai/frontend/.husky
invokeai/frontend/patches
# Ignore artifacts:
build
coverage
static
invokeai/frontend/dist
# Ignore all HTML files:
*.html
# Ignore deprecated docs
docs/installation/deprecated_documentation

View File

@@ -1,9 +1,9 @@
embeddedLanguageFormatting: auto
endOfLine: lf
singleQuote: true
semi: true
trailingComma: es5
tabWidth: 2
useTabs: false
singleQuote: true
quoteProps: as-needed
embeddedLanguageFormatting: auto
overrides:
- files: '*.md'
options:
@@ -11,9 +11,3 @@ overrides:
printWidth: 80
parser: markdown
cursorOffset: -1
- files: docs/**/*.md
options:
tabWidth: 4
- files: 'invokeai/frontend/public/locales/*.json'
options:
tabWidth: 4

View File

@@ -1,5 +0,0 @@
[pytest]
DJANGO_SETTINGS_MODULE = webtas.settings
; python_files = tests.py test_*.py *_tests.py
addopts = --cov=. --cov-config=.coveragerc --cov-report xml:cov.xml

View File

@@ -145,7 +145,7 @@ not supported.
_For Linux with an AMD GPU:_
```sh
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
```
_For Macintoshes, either Intel or M1/M2:_

View File

@@ -1,5 +0,0 @@
{
"MD046": false,
"MD007": false,
"MD030": false
}

View File

@@ -1,93 +0,0 @@
# Invoke.AI Architecture
```mermaid
flowchart TB
subgraph apps[Applications]
webui[WebUI]
cli[CLI]
subgraph webapi[Web API]
api[HTTP API]
sio[Socket.IO]
end
end
subgraph invoke[Invoke]
direction LR
invoker
services
sessions
invocations
end
subgraph core[AI Core]
Generate
end
webui --> webapi
webapi --> invoke
cli --> invoke
invoker --> services & sessions
invocations --> services
sessions --> invocations
services --> core
%% Styles
classDef sg fill:#5028C8,font-weight:bold,stroke-width:2,color:#fff,stroke:#14141A
classDef default stroke-width:2px,stroke:#F6B314,color:#fff,fill:#14141A
class apps,webapi,invoke,core sg
```
## Applications
Applications are built on top of the invoke framework. They should construct `invoker` and then interact through it. They should avoid interacting directly with core code in order to support a variety of configurations.
### Web UI
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/frontend` and the backend code is found in `/ldm/invoke/app/api_app.py` and `/ldm/invoke/app/api/`. The code is further organized as such:
| Component | Description |
| --- | --- |
| api_app.py | Sets up the API app, annotates the OpenAPI spec with additional data, and runs the API |
| dependencies | Creates all invoker services and the invoker, and provides them to the API |
| events | An eventing system that could in the future be adapted to support horizontal scale-out |
| sockets | The Socket.IO interface - handles listening to and emitting session events (events are defined in the events service module) |
| routers | API definitions for different areas of API functionality |
### CLI
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/ldm/invoke/app/cli_app.py`.
## Invoke
The Invoke framework provides the interface to the underlying AI systems and is built with flexibility and extensibility in mind. There are four major concepts: invoker, sessions, invocations, and services.
### Invoker
The invoker (`/ldm/invoke/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
- **invocation services**, which are used by invocations to interact with core functionality.
- **invoker services**, which are used by the invoker to manage sessions and manage the invocation queue.
### Sessions
Invocations and links between them form a graph, which is maintained in a session. Sessions can be queued for invocation, which will execute their graph (either the next ready invocation, or all invocations). Sessions also maintain execution history for the graph (including storage of any outputs). An invocation may be added to a session at any time, and there is capability to add and entire graph at once, as well as to automatically link new invocations to previous invocations. Invocations can not be deleted or modified once added.
The session graph does not support looping. This is left as an application problem to prevent additional complexity in the graph.
### Invocations
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/ldm/invoke/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
### Services
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/ldm/invoke/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
## AI Core
The AI Core is represented by the rest of the code base (i.e. the code outside of `/ldm/invoke/app/`).

View File

@@ -1,105 +0,0 @@
# Invocations
Invocations represent a single operation, its inputs, and its outputs. These operations and their outputs can be chained together to generate and modify images.
## Creating a new invocation
To create a new invocation, either find the appropriate module file in `/ldm/invoke/app/invocations` to add your invocation to, or create a new one in that folder. All invocations in that folder will be discovered and made available to the CLI and API automatically. Invocations make use of [typing](https://docs.python.org/3/library/typing.html) and [pydantic](https://pydantic-docs.helpmanual.io/) for validation and integration into the CLI and API.
An invocation looks like this:
```py
class UpscaleInvocation(BaseInvocation):
"""Upscales an image."""
type: Literal['upscale'] = 'upscale'
# Inputs
image: Union[ImageField,None] = Field(description="The input image")
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
level: Literal[2,4] = Field(default=2, description = "The upscale level")
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get(self.image.image_type, self.image.image_name)
results = context.services.generate.upscale_and_reconstruct(
image_list = [[image, 0]],
upscale = (self.level, self.strength),
strength = 0.0, # GFPGAN strength
save_original = False,
image_callback = None,
)
# Results are image and seed, unwrap for now
# TODO: can this return multiple results?
image_type = ImageType.RESULT
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
context.services.images.save(image_type, image_name, results[0][0])
return ImageOutput(
image = ImageField(image_type = image_type, image_name = image_name)
)
```
Each portion is important to implement correctly.
### Class definition and type
```py
class UpscaleInvocation(BaseInvocation):
"""Upscales an image."""
type: Literal['upscale'] = 'upscale'
```
All invocations must derive from `BaseInvocation`. They should have a docstring that declares what they do in a single, short line. They should also have a `type` with a type hint that's `Literal["command_name"]`, where `command_name` is what the user will type on the CLI or use in the API to create this invocation. The `command_name` must be unique. The `type` must be assigned to the value of the literal in the type hint.
### Inputs
```py
# Inputs
image: Union[ImageField,None] = Field(description="The input image")
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
level: Literal[2,4] = Field(default=2, description="The upscale level")
```
Inputs consist of three parts: a name, a type hint, and a `Field` with default, description, and validation information. For example:
| Part | Value | Description |
| ---- | ----- | ----------- |
| Name | `strength` | This field is referred to as `strength` |
| Type Hint | `float` | This field must be of type `float` |
| Field | `Field(default=0.75, gt=0, le=1, description="The strength")` | The default value is `0.75`, the value must be in the range (0,1], and help text will show "The strength" for this field. |
Notice that `image` has type `Union[ImageField,None]`. The `Union` allows this field to be parsed with `None` as a value, which enables linking to previous invocations. All fields should either provide a default value or allow `None` as a value, so that they can be overwritten with a linked output from another invocation.
The special type `ImageField` is also used here. All images are passed as `ImageField`, which protects them from pydantic validation errors (since images only ever come from links).
Finally, note that for all linking, the `type` of the linked fields must match. If the `name` also matches, then the field can be **automatically linked** to a previous invocation by name and matching.
### Invoke Function
```py
def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get(self.image.image_type, self.image.image_name)
results = context.services.generate.upscale_and_reconstruct(
image_list = [[image, 0]],
upscale = (self.level, self.strength),
strength = 0.0, # GFPGAN strength
save_original = False,
image_callback = None,
)
# Results are image and seed, unwrap for now
image_type = ImageType.RESULT
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
context.services.images.save(image_type, image_name, results[0][0])
return ImageOutput(
image = ImageField(image_type = image_type, image_name = image_name)
)
```
The `invoke` function is the last portion of an invocation. It is provided an `InvocationContext` which contains services to perform work as well as a `session_id` for use as needed. It should return a class with output values that derives from `BaseInvocationOutput`.
Before being called, the invocation will have all of its fields set from defaults, inputs, and finally links (overriding in that order).
Assume that this invocation may be running simultaneously with other invocations, may be running on another machine, or in other interesting scenarios. If you need functionality, please provide it as a service in the `InvocationServices` class, and make sure it can be overridden.
### Outputs
```py
class ImageOutput(BaseInvocationOutput):
"""Base class for invocations that output an image"""
type: Literal['image'] = 'image'
image: ImageField = Field(default=None, description="The output image")
```
Output classes look like an invocation class without the invoke method. Prefer to use an existing output class if available, and prefer to name inputs the same as outputs when possible, to promote automatic invocation linking.

View File

@@ -1,5 +1,5 @@
---
title: Styles and Subjects
title: Concepts Library
---
# :material-library-shelves: The Hugging Face Concepts Library and Importing Textual Inversion files
@@ -25,14 +25,10 @@ library which downloads and merges TI files automatically upon request. You can
also install your own or others' TI files by placing them in a designated
directory.
You may also be interested in using [LoRA Models](LORAS.md) to
generate images with specialized styles and subjects.
### An Example
Here are a few examples to illustrate how Textual Inversion works. All
these images were generated using the command-line client and the
Stable Diffusion 1.5 model:
Here are a few examples to illustrate how it works. All these images were
generated using the command-line client and the Stable Diffusion 1.5 model:
| Japanese gardener | Japanese gardener <ghibli-face> | Japanese gardener <hoi4-leaders> | Japanese gardener <cartoona-animals> |
| :--------------------------------: | :-----------------------------------: | :------------------------------------: | :----------------------------------------: |
@@ -113,50 +109,21 @@ For example, TI files generated by the Hugging Face toolkit share the named
`learned_embedding.bin`. You can use subdirectories to keep them distinct.
At startup time, InvokeAI will scan the `embeddings` directory and load any TI
files it finds there. At startup you will see messages similar to these:
files it finds there. At startup you will see a message similar to this one:
```bash
>> Loading embeddings from /data/lstein/invokeai-2.3/embeddings
| Loading v1 embedding file: style-hamunaptra
| Loading v4 embedding file: embeddings/learned_embeds-steps-500.bin
| Loading v2 embedding file: lfa
| Loading v3 embedding file: easynegative
| Loading v1 embedding file: rem_rezero
| Loading v2 embedding file: midj-strong
| Loading v4 embedding file: anime-background-style-v2/learned_embeds.bin
| Loading v4 embedding file: kamon-style/learned_embeds.bin
** Notice: kamon-style/learned_embeds.bin was trained on a model with an incompatible token dimension: 768 vs 1024.
>> Textual inversion triggers: <anime-background-style-v2>, <easynegative>, <lfa>, <midj-strong>, <milo>, Rem3-2600, Style-Hamunaptra
>> Current embedding manager terms: *, <HOI4-Leader>, <princess-knight>
```
Textual Inversion embeddings trained on version 1.X stable diffusion
models are incompatible with version 2.X models and vice-versa.
Note the `*` trigger term. This is a placeholder term that many early TI
tutorials taught people to use rather than a more descriptive term.
Unfortunately, if you have multiple TI files that all use this term, only the
first one loaded will be triggered by use of the term.
After the embeddings load, InvokeAI will print out a list of all the
recognized trigger terms. To trigger the term, include it in the
prompt exactly as written, including angle brackets if any and
respecting the capitalization.
There are at least four different embedding file formats, and each uses
a different convention for the trigger terms. In some cases, the
trigger term is specified in the file contents and may or may not be
surrounded by angle brackets. In the example above, `Rem3-2600`,
`Style-Hamunaptra`, and `<midj-strong>` were specified this way and
there is no easy way to change the term.
In other cases the trigger term is not contained within the embedding
file. In this case, InvokeAI constructs a trigger term consisting of
the base name of the file (without the file extension) surrounded by
angle brackets. In the example above `<easynegative`> is such a file
(the filename was `easynegative.safetensors`). In such cases, you can
change the trigger term simply by renaming the file.
## Training your own Textual Inversion models
InvokeAI provides a script that lets you train your own Textual
Inversion embeddings using a small number (about a half-dozen) images
of your desired style or subject. Please see [Textual
Inversion](TEXTUAL_INVERSION.md) for details.
To avoid this problem, you can use the `merge_embeddings.py` script to merge two
or more TI files together. If it encounters a collision of terms, the script
will prompt you to select new terms that do not collide. See
[Textual Inversion](TEXTUAL_INVERSION.md) for details.
## Further Reading

View File

@@ -1,100 +0,0 @@
---
title: Low-Rank Adaptation (LoRA) Models
---
# :material-library-shelves: Using Low-Rank Adaptation (LoRA) Models
## Introduction
LoRA is a technique for fine-tuning Stable Diffusion models using much
less time and memory than traditional training techniques. The
resulting model files are much smaller than full model files, and can
be used to generate specialized styles and subjects.
LoRAs are built on top of Stable Diffusion v1.x or 2.x checkpoint or
diffusers models. To load a LoRA, you include its name in the text
prompt using a simple syntax described below. While you will generally
get the best results when you use the same model the LoRA was trained
on, they will work to a greater or lesser extent with other models.
The major caveat is that a LoRA built on top of a SD v1.x model cannot
be used with a v2.x model, and vice-versa. If you try, you will get an
error! You may refer to multiple LoRAs in your prompt.
When you apply a LoRA in a prompt you can specify a weight. The higher
the weight, the more influence it will have on the image. Useful
ranges for weights are usually in the 0.0 to 1.0 range (with ranges
between 0.5 and 1.0 being most typical). However you can specify a
higher weight if you wish. Like models, each LoRA has a slightly
different useful weight range and will interact with other generation
parameters such as the CFG, step count and sampler. The author of the
LoRA will often provide guidance on the best settings, but feel free
to experiment. Be aware that it often helps to reduce the CFG value
when using LoRAs.
## Installing LoRAs
This is very easy! Download a LoRA model file from your favorite site
(e.g. [CIVITAI](https://civitai.com) and place it in the `loras`
folder in the InvokeAI root directory (usually `~invokeai/loras` on
Linux/Macintosh machines, and `C:\Users\your-name\invokeai/loras` on
Windows systems). If the `loras` folder does not already exist, just
create it. The vast majority of LoRA models use the Kohya file format,
which is a type of `.safetensors` file.
You may change where InvokeAI looks for the `loras` folder by passing the
`--lora_directory` option to the `invoke.sh`/`invoke.bat` launcher, or
by placing the option in `invokeai.init`. For example:
```
invoke.sh --lora_directory=C:\Users\your-name\SDModels\lora
```
## Using a LoRA in your prompt
To activate a LoRA use the syntax `withLora(my-lora-name,weight)`
somewhere in the text of the prompt. The position doesn't matter; use
whatever is most comfortable for you.
For example, if you have a LoRA named `parchment_people.safetensors`
in your `loras` directory, you can load it with a weight of 0.9 with a
prompt like this one:
```
family sitting at dinner table withLora(parchment_people,0.9)
```
Add additional `withLora()` phrases to load more LoRAs.
You may omit the weight entirely to default to a weight of 1.0:
```
family sitting at dinner table withLora(parchment_people)
```
If you watch the console as your prompt executes, you will see
messages relating to the loading and execution of the LoRA. If things
don't work as expected, note down the console messages and report them
on the InvokeAI Issues pages or Discord channel.
That's pretty much all you need to know!
## Training Kohya Models
InvokeAI cannot currently train LoRA models, but it can load and use
existing LoRA ones to generate images. While there are several LoRA
model file formats, the predominant one is ["Kohya"
format](https://github.com/kohya-ss/sd-scripts), written by [Kohya
S.](https://github.com/kohya-ss). InvokeAI provides support for this
format. For creating your own Kohya models, we recommend the Windows
GUI written by former InvokeAI-team member
[bmaltais](https://github.com/bmaltais), which can be found at
[kohya_ss](https://github.com/bmaltais/kohya_ss).
We can also recommend the [HuggingFace DreamBooth Training
UI](https://huggingface.co/spaces/lora-library/LoRA-DreamBooth-Training-UI),
a paid service that supports both Textual Inversion and LoRA training.
You may also be interested in [Textual
Inversion](TEXTUAL_INVERSION.md) training, which is supported by
InvokeAI as a text console and command-line tool.

View File

@@ -154,11 +154,8 @@ training sets will converge with 2000-3000 steps.
This adjusts how many training images are processed simultaneously in
each step. Higher values will cause the training process to run more
quickly, but use more memory. The default size is selected based on
whether you have the `xformers` memory-efficient attention library
installed. If `xformers` is available, the batch size will be 8,
otherwise 3. These values were chosen to allow training to run with
GPUs with as little as 12 GB VRAM.
quickly, but use more memory. The default size will run with GPUs with
as little as 12 GB.
### Learning rate
@@ -175,10 +172,8 @@ learning rate to improve performance.
### Use xformers acceleration
This will activate XFormers memory-efficient attention, which will
reduce memory requirements by half or more and allow you to select a
higher batch size. You need to have XFormers installed for this to
have an effect.
This will activate XFormers memory-efficient attention. You need to
have XFormers installed for this to have an effect.
### Learning rate scheduler
@@ -255,49 +250,6 @@ invokeai-ti \
--only_save_embeds
```
## Using Distributed Training
If you have multiple GPUs on one machine, or a cluster of GPU-enabled
machines, you can activate distributed training. See the [HuggingFace
Accelerate pages](https://huggingface.co/docs/accelerate/index) for
full information, but the basic recipe is:
1. Enter the InvokeAI developer's console command line by selecting
option [8] from the `invoke.sh`/`invoke.bat` script.
2. Configurate Accelerate using `accelerate config`:
```sh
accelerate config
```
This will guide you through the configuration process, including
specifying how many machines you will run training on and the number
of GPUs pe rmachine.
You only need to do this once.
3. Launch training from the command line using `accelerate launch`. Be sure
that your current working directory is the InvokeAI root directory (usually
named `invokeai` in your home directory):
```sh
accelerate launch .venv/bin/invokeai-ti \
--model=stable-diffusion-1.5 \
--resolution=512 \
--learnable_property=object \
--initializer_token='*' \
--placeholder_token='<shraddha>' \
--train_data_dir=/home/lstein/invokeai/text-inversion-training-data/shraddha \
--output_dir=/home/lstein/invokeai/text-inversion-training/shraddha \
--scale_lr \
--train_batch_size=10 \
--gradient_accumulation_steps=4 \
--max_train_steps=2000 \
--learning_rate=0.0005 \
--lr_scheduler=constant \
--mixed_precision=fp16 \
--only_save_embeds
```
## Using Embeddings
After training completes, the resultant embeddings will be saved into your `$INVOKEAI_ROOT/embeddings/<trigger word>/learned_embeds.bin`.

View File

@@ -2,84 +2,62 @@
title: Overview
---
- The Basics
Here you can find the documentation for InvokeAI's various features.
- The [Web User Interface](WEB.md)
## The Basics
### * The [Web User Interface](WEB.md)
Guide to the Web interface. Also see the [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
Guide to the Web interface. Also see the
[WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
### * The [Unified Canvas](UNIFIED_CANVAS.md)
Build complex scenes by combine and modifying multiple images in a stepwise
fashion. This feature combines img2img, inpainting and outpainting in
a single convenient digital artist-optimized user interface.
- The [Unified Canvas](UNIFIED_CANVAS.md)
### * The [Command Line Interface (CLI)](CLI.md)
Scriptable access to InvokeAI's features.
Build complex scenes by combine and modifying multiple images in a
stepwise fashion. This feature combines img2img, inpainting and
outpainting in a single convenient digital artist-optimized user
interface.
## Image Generation
### * [Prompt Engineering](PROMPTS.md)
Get the images you want with the InvokeAI prompt engineering language.
- The [Command Line Interface (CLI)](CLI.md)
## * [Post-Processing](POSTPROCESS.md)
Restore mangled faces and make images larger with upscaling. Also see the [Embiggen Upscaling Guide](EMBIGGEN.md).
Scriptable access to InvokeAI's features.
## * The [Concepts Library](CONCEPTS.md)
Add custom subjects and styles using HuggingFace's repository of embeddings.
- [Visual Manual for InvokeAI](https://docs.google.com/presentation/d/e/2PACX-1vSE90aC7bVVg0d9KXVMhy-Wve-wModgPFp7AGVTOCgf4xE03SnV24mjdwldolfCr59D_35oheHe4Cow/pub?start=false&loop=true&delayms=60000) (contributed by Statcomm)
### * [Image-to-Image Guide for the CLI](IMG2IMG.md)
Use a seed image to build new creations in the CLI.
- Image Generation
### * [Inpainting Guide for the CLI](INPAINTING.md)
Selectively erase and replace portions of an existing image in the CLI.
- [Prompt Engineering](PROMPTS.md)
### * [Outpainting Guide for the CLI](OUTPAINTING.md)
Extend the borders of the image with an "outcrop" function within the CLI.
Get the images you want with the InvokeAI prompt engineering language.
### * [Generating Variations](VARIATIONS.md)
Have an image you like and want to generate many more like it? Variations
are the ticket.
- [Post-Processing](POSTPROCESS.md)
## Model Management
Restore mangled faces and make images larger with upscaling. Also see
the [Embiggen Upscaling Guide](EMBIGGEN.md).
## * [Model Installation](../installation/050_INSTALLING_MODELS.md)
Learn how to import third-party models and switch among them. This
guide also covers optimizing models to load quickly.
- The [Concepts Library](CONCEPTS.md)
## * [Merging Models](MODEL_MERGING.md)
Teach an old model new tricks. Merge 2-3 models together to create a
new model that combines characteristics of the originals.
Add custom subjects and styles using HuggingFace's repository of
embeddings.
## * [Textual Inversion](TEXTUAL_INVERSION.md)
Personalize models by adding your own style or subjects.
- [Image-to-Image Guide for the CLI](IMG2IMG.md)
# Other Features
Use a seed image to build new creations in the CLI.
## * [The NSFW Checker](NSFW.md)
Prevent InvokeAI from displaying unwanted racy images.
- [Inpainting Guide for the CLI](INPAINTING.md)
Selectively erase and replace portions of an existing image in the CLI.
- [Outpainting Guide for the CLI](OUTPAINTING.md)
Extend the borders of the image with an "outcrop" function within the
CLI.
- [Generating Variations](VARIATIONS.md)
Have an image you like and want to generate many more like it?
Variations are the ticket.
- Model Management
- [Model Installation](../installation/050_INSTALLING_MODELS.md)
Learn how to import third-party models and switch among them. This guide
also covers optimizing models to load quickly.
- [Merging Models](MODEL_MERGING.md)
Teach an old model new tricks. Merge 2-3 models together to create a new
model that combines characteristics of the originals.
- [Textual Inversion](TEXTUAL_INVERSION.md)
Personalize models by adding your own style or subjects.
- Other Features
- [The NSFW Checker](NSFW.md)
Prevent InvokeAI from displaying unwanted racy images.
- [Miscellaneous](OTHER.md)
Run InvokeAI on Google Colab, generate images with repeating patterns,
batch process a file of prompts, increase the "creativity" of image
generation by adding initial noise, and more!
## * [Miscellaneous](OTHER.md)
Run InvokeAI on Google Colab, generate images with repeating patterns,
batch process a file of prompts, increase the "creativity" of image
generation by adding initial noise, and more!

View File

@@ -1,4 +0,0 @@
# :octicons-file-code-16: IDE-Settings
Here we will share settings for IDEs used by our developers, maybe you can find
something interestening which will help to boost your development efficency 🔥

View File

@@ -1,250 +0,0 @@
---
title: Visual Studio Code
---
# :material-microsoft-visual-studio-code:Visual Studio Code
The Workspace Settings are stored in the project (repository) root and get
higher priorized than your user settings.
This helps to have different settings for different projects, while the user
settings get used as a default value if no workspace settings are provided.
## tasks.json
First we will create a task configuration which will create a virtual
environment and update the deps (pip, setuptools and wheel).
Into this venv we will then install the pyproject.toml in editable mode with
dev, docs and test dependencies.
```json title=".vscode/tasks.json"
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "Create virtual environment",
"detail": "Create .venv and upgrade pip, setuptools and wheel",
"command": "python3",
"args": [
"-m",
"venv",
".venv",
"--prompt",
"InvokeAI",
"--upgrade-deps"
],
"runOptions": {
"instanceLimit": 1,
"reevaluateOnRerun": true
},
"group": {
"kind": "build"
},
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "shared",
"showReuseMessage": true,
"clear": false
}
},
{
"label": "build InvokeAI",
"detail": "Build pyproject.toml with extras dev, docs and test",
"command": "${workspaceFolder}/.venv/bin/python3",
"args": [
"-m",
"pip",
"install",
"--use-pep517",
"--editable",
".[dev,docs,test]"
],
"dependsOn": "Create virtual environment",
"dependsOrder": "sequence",
"group": {
"kind": "build",
"isDefault": true
},
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "shared",
"showReuseMessage": true,
"clear": false
}
}
]
}
```
The fastest way to build InvokeAI now is ++cmd+shift+b++
## launch.json
This file is used to define debugger configurations, so that you can one-click
launch and monitor the application, set halt points to inspect specific states,
...
```json title=".vscode/launch.json"
{
"version": "0.2.0",
"configurations": [
{
"name": "invokeai web",
"type": "python",
"request": "launch",
"program": ".venv/bin/invokeai",
"justMyCode": true
},
{
"name": "invokeai cli",
"type": "python",
"request": "launch",
"program": ".venv/bin/invokeai",
"justMyCode": true
},
{
"name": "mkdocs serve",
"type": "python",
"request": "launch",
"program": ".venv/bin/mkdocs",
"args": ["serve"],
"justMyCode": true
}
]
}
```
Then you only need to hit ++f5++ and the fun begins :nerd: (It is asumed that
you have created a virtual environment via the [tasks](#tasksjson) from the
previous step.)
## extensions.json
A list of recommended vscode-extensions to make your life easier:
```json title=".vscode/extensions.json"
{
"recommendations": [
"editorconfig.editorconfig",
"github.vscode-pull-request-github",
"ms-python.black-formatter",
"ms-python.flake8",
"ms-python.isort",
"ms-python.python",
"ms-python.vscode-pylance",
"redhat.vscode-yaml",
"tamasfe.even-better-toml",
"eamodio.gitlens",
"foxundermoon.shell-format",
"timonwong.shellcheck",
"esbenp.prettier-vscode",
"davidanson.vscode-markdownlint",
"yzhang.markdown-all-in-one",
"bierner.github-markdown-preview",
"ms-azuretools.vscode-docker",
"mads-hartmann.bash-ide-vscode"
]
}
```
## settings.json
With bellow settings your files already get formated when you save them (only
your modifications if available), which will help you to not run into trouble
with the pre-commit hooks. If the hooks fail, they will prevent you from
commiting, but most hooks directly add a fixed version, so that you just need to
stage and commit them:
```json title=".vscode/settings.json"
{
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.quickSuggestions": {
"comments": false,
"strings": true,
"other": true
},
"editor.suggest.insertMode": "replace",
"gitlens.codeLens.scopes": ["document"]
},
"[jsonc]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "file"
},
"[toml]": {
"editor.defaultFormatter": "tamasfe.even-better-toml",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[yaml]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[markdown]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.rulers": [80],
"editor.unicodeHighlight.ambiguousCharacters": false,
"editor.unicodeHighlight.invisibleCharacters": false,
"diffEditor.ignoreTrimWhitespace": false,
"editor.wordWrap": "on",
"editor.quickSuggestions": {
"comments": "off",
"strings": "off",
"other": "off"
},
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[shellscript]": {
"editor.defaultFormatter": "foxundermoon.shell-format"
},
"[ignore]": {
"editor.defaultFormatter": "foxundermoon.shell-format"
},
"editor.rulers": [88],
"evenBetterToml.formatter.alignEntries": false,
"evenBetterToml.formatter.allowedBlankLines": 1,
"evenBetterToml.formatter.arrayAutoExpand": true,
"evenBetterToml.formatter.arrayTrailingComma": true,
"evenBetterToml.formatter.arrayAutoCollapse": true,
"evenBetterToml.formatter.columnWidth": 88,
"evenBetterToml.formatter.compactArrays": true,
"evenBetterToml.formatter.compactInlineTables": true,
"evenBetterToml.formatter.indentEntries": false,
"evenBetterToml.formatter.inlineTableExpand": true,
"evenBetterToml.formatter.reorderArrays": true,
"evenBetterToml.formatter.reorderKeys": true,
"evenBetterToml.formatter.compactEntries": false,
"evenBetterToml.schema.enabled": true,
"python.analysis.typeCheckingMode": "basic",
"python.formatting.provider": "black",
"python.languageServer": "Pylance",
"python.linting.enabled": true,
"python.linting.flake8Enabled": true,
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true,
"python.testing.pytestArgs": [
"tests",
"--cov=ldm",
"--cov-branch",
"--cov-report=term:skip-covered"
],
"yaml.schemas": {
"https://json.schemastore.org/prettierrc.json": "${workspaceFolder}/.prettierrc.yaml"
}
}
```

View File

@@ -1,135 +0,0 @@
---
title: Pull-Request
---
# :octicons-git-pull-request-16: Pull-Request
## pre-requirements
To follow the steps in this tutorial you will need:
- [GitHub](https://github.com) account
- [git](https://git-scm.com/downloads) source controll
- Text / Code Editor (personally I preffer
[Visual Studio Code](https://code.visualstudio.com/Download))
- Terminal:
- If you are on Linux/MacOS you can use bash or zsh
- for Windows Users the commands are written for PowerShell
## Fork Repository
The first step to be done if you want to contribute to InvokeAI, is to fork the
rpeository.
Since you are already reading this doc, the easiest way to do so is by clicking
[here](https://github.com/invoke-ai/InvokeAI/fork). You could also open
[InvokeAI](https://github.com/invoke-ai/InvoekAI) and click on the "Fork" Button
in the top right.
## Clone your fork
After you forked the Repository, you should clone it to your dev machine:
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
``` sh
git clone https://github.com/<github username>/InvokeAI \
&& cd InvokeAI
```
=== ":fontawesome-brands-windows:Windows"
``` powershell
git clone https://github.com/<github username>/InvokeAI `
&& cd InvokeAI
```
## Install in Editable Mode
To install InvokeAI in editable mode, (as always) we recommend to create and
activate a venv first. Afterwards you can install the InvokeAI Package,
including dev and docs extras in editable mode, follwed by the installation of
the pre-commit hook:
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
``` sh
python -m venv .venv \
--prompt InvokeAI \
--upgrade-deps \
&& source .venv/bin/activate \
&& pip install \
--upgrade-deps \
--use-pep517 \
--editable=".[dev,docs]" \
&& pre-commit install
```
=== ":fontawesome-brands-windows:Windows"
``` powershell
python -m venv .venv `
--prompt InvokeAI `
--upgrade-deps `
&& .venv/scripts/activate.ps1 `
&& pip install `
--upgrade `
--use-pep517 `
--editable=".[dev,docs]" `
&& pre-commit install
```
## Create a branch
Make sure you are on main branch, from there create your feature branch:
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
``` sh
git checkout main \
&& git pull \
&& git checkout -B <branch name>
```
=== ":fontawesome-brands-windows:Windows"
``` powershell
git checkout main `
&& git pull `
&& git checkout -B <branch name>
```
## Commit your changes
When you are done with adding / updating content, you need to commit those
changes to your repository before you can actually open an PR:
```{ .sh .annotate }
git add <files you have changed> # (1)!
git commit -m "A commit message which describes your change"
git push
```
1. Replace this with a space seperated list of the files you changed, like:
`README.md foo.sh bar.json baz`
## Create a Pull Request
After pushing your changes, you are ready to create a Pull Request. just head
over to your fork on [GitHub](https://github.com), which should already show you
a message that there have been recent changes on your feature branch and a green
button which you could use to create the PR.
The default target for your PRs would be the main branch of
[invoke-ai/InvokeAI](https://github.com/invoke-ai/InvokeAI)
Another way would be to create it in VS-Code or via the GitHub CLI (or even via
the GitHub CLI in a VS-Code Terminal Window 🤭):
```sh
gh pr create
```
The CLI will inform you if there are still unpushed commits on your branch. It
will also prompt you for things like the the Title and the Body (Description) if
you did not already pass them as arguments.

View File

@@ -1,26 +0,0 @@
---
title: Issues
---
# :octicons-issue-opened-16: Issues
## :fontawesome-solid-bug: Report a bug
If you stumbled over a bug while using InvokeAI, we would apreciate it a lot if
you
[open a issue](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
to inform us about the details so that our developers can look into it.
If you also know how to fix the bug, take a look [here](010_PULL_REQUEST.md) to
find out how to create a Pull Request.
## Request a feature
If you have a idea for a new feature on your mind which you would like to see in
InvokeAI, there is a
[feature request](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
available in the issues section of the repository.
If you are just curious which features already got requested you can find the
overview of open requests
[here](https://github.com/invoke-ai/InvokeAI/labels/enhancement)

View File

@@ -1,32 +0,0 @@
---
title: docs
---
# :simple-readthedocs: MkDocs-Material
If you want to contribute to the docs, there is a easy way to verify the results
of your changes before commiting them.
Just follow the steps in the [Pull-Requests](010_PULL_REQUEST.md) docs, there we
already
[create a venv and install the docs extras](010_PULL_REQUEST.md#install-in-editable-mode).
When installed it's as simple as:
```sh
mkdocs serve
```
This will build the docs locally and serve them on your local host, even
auto-refresh is included, so you can just update a doc, save it and tab to the
browser, without the needs of restarting the `mkdocs serve`.
More information about the "mkdocs flavored markdown syntax" can be found
[here](https://squidfunk.github.io/mkdocs-material/reference/).
## :material-microsoft-visual-studio-code:VS-Code
We also provide a
[launch configuration for VS-Code](../IDE-Settings/vs-code.md#launchjson) which
includes a `mkdocs serve` entrypoint as well. You also don't have to worry about
the formatting since this is automated via prettier, but this is of course not
limited to VS-Code.

View File

@@ -1,76 +0,0 @@
# Tranformation to nodes
## Current state
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img| generate(generate);
web --> |txt2img| generate(generate);
cli --> |txt2img| generate(generate);
cli --> |img2img| generate(generate);
generate --> model_manager;
generate --> generators;
generate --> ti_manager[TI Manager];
generate --> etc;
```
## Transitional Architecture
### first step
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img| img2img_node(Img2img node);
web --> |txt2img| generate(generate);
img2img_node --> model_manager;
img2img_node --> generators;
cli --> |txt2img| generate;
cli --> |img2img| generate;
generate --> model_manager;
generate --> generators;
generate --> ti_manager[TI Manager];
generate --> etc;
```
### second step
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img| img2img_node(img2img node);
img2img_node --> model_manager;
img2img_node --> generators;
web --> |txt2img| txt2img_node(txt2img node);
cli --> |txt2img| txt2img_node;
cli --> |img2img| generate(generate);
generate --> model_manager;
generate --> generators;
generate --> ti_manager[TI Manager];
generate --> etc;
txt2img_node --> model_manager;
txt2img_node --> generators;
txt2img_node --> ti_manager[TI Manager];
```
## Final Architecture
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img|img2img_node(img2img node);
cli --> |img2img|img2img_node;
web --> |txt2img|txt2img_node(txt2img node);
cli --> |txt2img|txt2img_node;
img2img_node --> model_manager;
txt2img_node --> model_manager;
img2img_node --> generators;
txt2img_node --> generators;
img2img_node --> ti_manager[TI Manager];
txt2img_node --> ti_manager[TI Manager];
```

View File

@@ -1,16 +0,0 @@
---
title: Contributing
---
# :fontawesome-solid-code-commit: Contributing
There are different ways how you can contribute to
[InvokeAI](https://github.com/invoke-ai/InvokeAI), like Translations, opening
Issues for Bugs or ideas how to improve.
This Section of the docs will explain some of the different ways of how you can
contribute to make it easier for newcommers as well as advanced users :nerd:
If you want to contribute code, but you do not have an exact idea yet, take a
look at the currently open
[:fontawesome-solid-bug: Bug Reports](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)

View File

@@ -1,12 +0,0 @@
# :material-help:Help
If you are looking for help with the installation of InvokeAI, please take a
look into the [Installation](../installation/index.md) section of the docs.
Here you will find help to topics like
- how to contribute
- configuration recommendation for IDEs
If you have an Idea about what's missing and aren't scared from contributing,
just take a look at [DOCS](./contributing/030_DOCS.md) to find out how to do so.

View File

@@ -2,8 +2,6 @@
title: Home
---
# :octicons-home-16: Home
<!--
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
@@ -31,36 +29,36 @@ title: Home
[![github open prs badge]][github open prs link]
[ci checks on dev badge]:
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
[ci checks on dev link]:
https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
[ci checks on main badge]:
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
[ci checks on main link]:
https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
[discord link]: https://discord.gg/ZmtBAhwWhy
[github forks badge]:
https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
[github forks link]:
https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
[github open issues badge]:
https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
[github open issues link]:
https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
[github open prs badge]:
https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
[github open prs link]:
https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
[github stars badge]:
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
[latest commit to dev badge]:
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
[latest commit to dev link]:
https://github.com/invoke-ai/InvokeAI/commits/development
https://github.com/invoke-ai/InvokeAI/commits/development
[latest release badge]:
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
</div>
@@ -89,24 +87,24 @@ Q&A</a>]
You wil need one of the following:
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
only)
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
only)
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
We do **not recommend** the following video cards due to issues with their
running in half-precision mode and having insufficient VRAM to render 512x512
images in full-precision mode:
- NVIDIA 10xx series cards such as the 1080ti
- GTX 1650 series cards
- GTX 1660 series cards
- NVIDIA 10xx series cards such as the 1080ti
- GTX 1650 series cards
- GTX 1660 series cards
### :fontawesome-solid-memory: Memory and Disk
- At least 12 GB Main Memory RAM.
- At least 18 GB of free disk space for the machine learning model, Python,
and all its dependencies.
- At least 12 GB Main Memory RAM.
- At least 18 GB of free disk space for the machine learning model, Python, and
all its dependencies.
## :octicons-package-dependencies-24: Installation
@@ -115,407 +113,133 @@ either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
driver).
### [Installation Getting Started Guide](installation)
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
This method is recommended for 1st time users
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
This method is recommended for experienced users and developers
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
This method is recommended for those familiar with running Docker containers
### Other Installation Guides
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
- [XFormers](installation/070_INSTALL_XFORMERS.md)
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
- [XFormers](installation/070_INSTALL_XFORMERS.md)
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
## :octicons-gift-24: InvokeAI Features
### The InvokeAI Web Interface
- [WebUI overview](features/WEB.md)
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
- [Visual Manual for InvokeAI v2.3.1](https://docs.google.com/presentation/d/e/2PACX-1vSE90aC7bVVg0d9KXVMhy-Wve-wModgPFp7AGVTOCgf4xE03SnV24mjdwldolfCr59D_35oheHe4Cow/pub?start=false&loop=true&delayms=60000) (contributed by Statcomm)
- [WebUI overview](features/WEB.md)
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
<!-- separator -->
<!-- separator -->
### The InvokeAI Command Line Interface
- [Command Line Interace Reference Guide](features/CLI.md)
- [Command Line Interace Reference Guide](features/CLI.md)
<!-- separator -->
### Image Management
- [Image2Image](features/IMG2IMG.md)
- [Inpainting](features/INPAINTING.md)
- [Outpainting](features/OUTPAINTING.md)
- [Adding custom styles and subjects](features/CONCEPTS.md)
- [Using LoRA models](features/LORAS.md)
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
- [Embiggen upscaling](features/EMBIGGEN.md)
- [Other Features](features/OTHER.md)
- [Image2Image](features/IMG2IMG.md)
- [Inpainting](features/INPAINTING.md)
- [Outpainting](features/OUTPAINTING.md)
- [Adding custom styles and subjects](features/CONCEPTS.md)
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
- [Embiggen upscaling](features/EMBIGGEN.md)
- [Other Features](features/OTHER.md)
<!-- separator -->
### Model Management
- [Installing](installation/050_INSTALLING_MODELS.md)
- [Model Merging](features/MODEL_MERGING.md)
- [Adding custom styles and subjects via embeddings](features/CONCEPTS.md)
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
- [Installing](installation/050_INSTALLING_MODELS.md)
- [Model Merging](features/MODEL_MERGING.md)
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
<!-- seperator -->
### Prompt Engineering
- [Prompt Syntax](features/PROMPTS.md)
- [Generating Variations](features/VARIATIONS.md)
- [Prompt Syntax](features/PROMPTS.md)
- [Generating Variations](features/VARIATIONS.md)
## :octicons-log-16: Latest Changes
### v2.3.3 <small>(29 March 2023)</small>
#### Bug Fixes
1. When using legacy checkpoints with an external VAE, the VAE file is now scanned for malware prior to loading. Previously only the main model weights file was scanned.
2. Textual inversion will select an appropriate batchsize based on whether `xformers` is active, and will default to `xformers` enabled if the library is detected.
3. The batch script log file names have been fixed to be compatible with Windows.
4. Occasional corruption of the `.next_prefix` file (which stores the next output file name in sequence) on Windows systems is now detected and corrected.
5. An infinite loop when opening the developer's console from within the `invoke.sh` script has been corrected.
#### Enhancements
1. It is now possible to load and run several community-contributed SD-2.0 based models, including the infamous "Illuminati" model.
2. The "NegativePrompts" embedding file, and others like it, can now be loaded by placing it in the InvokeAI `embeddings` directory.
3. If no `--model` is specified at launch time, InvokeAI will remember the last model used and restore it the next time it is launched.
4. On Linux systems, the `invoke.sh` launcher now uses a prettier console-based interface. To take advantage of it, install the `dialog` package using your package manager (e.g. `sudo apt install dialog`).
5. When loading legacy models (safetensors/ckpt) you can specify a custom config file and/or a VAE by placing like-named files in the same directory as the model following this example:
```
my-favorite-model.ckpt
my-favorite-model.yaml
my-favorite-model.vae.pt # or my-favorite-model.vae.safetensors
```
### v2.3.2 <small>(13 March 2023)</small>
#### Bugfixes
Since version 2.3.1 the following bugs have been fixed:
1. Black images appearing for potential NSFW images when generating with legacy checkpoint models and both `--no-nsfw_checker` and `--ckpt_convert` turned on.
2. Black images appearing when generating from models fine-tuned on Stable-Diffusion-2-1-base. When importing V2-derived models, you may be asked to select whether the model was derived from a "base" model (512 pixels) or the 768-pixel SD-2.1 model.
3. The "Use All" button was not restoring the Hi-Res Fix setting on the WebUI
4. When using the model installer console app, models failed to import correctly when importing from directories with spaces in their names. A similar issue with the output directory was also fixed.
5. Crashes that occurred during model merging.
6. Restore previous naming of Stable Diffusion base and 768 models.
7. Upgraded to latest versions of `diffusers`, `transformers`, `safetensors` and `accelerate` libraries upstream. We hope that this will fix the `assertion NDArray > 2**32` issue that MacOS users have had when generating images larger than 768x768 pixels. Please report back.
As part of the upgrade to `diffusers`, the location of the diffusers-based models has changed from `models/diffusers` to `models/hub`. When you launch InvokeAI for the first time, it will prompt you to OK a one-time move. This should be quick and harmless, but if you have modified your `models/diffusers` directory in some way, for example using symlinks, you may wish to cancel the migration and make appropriate adjustments.
#### New "Invokeai-batch" script
2.3.2 introduces a new command-line only script called
`invokeai-batch` that can be used to generate hundreds of images from
prompts and settings that vary systematically. This can be used to try
the same prompt across multiple combinations of models, steps, CFG
settings and so forth. It also allows you to template prompts and
generate a combinatorial list like: ``` a shack in the mountains,
photograph a shack in the mountains, watercolor a shack in the
mountains, oil painting a chalet in the mountains, photograph a chalet
in the mountains, watercolor a chalet in the mountains, oil painting a
shack in the desert, photograph ... ```
If you have a system with multiple GPUs, or a single GPU with lots of
VRAM, you can parallelize generation across the combinatorial set,
reducing wait times and using your system's resources efficiently
(make sure you have good GPU cooling).
To try `invokeai-batch` out. Launch the "developer's console" using
the `invoke` launcher script, or activate the invokeai virtual
environment manually. From the console, give the command
`invokeai-batch --help` in order to learn how the script works and
create your first template file for dynamic prompt generation.
### v2.3.1 <small>(26 February 2023)</small>
This is primarily a bugfix release, but it does provide several new features that will improve the user experience.
#### Enhanced support for model management
InvokeAI now makes it convenient to add, remove and modify models. You can individually import models that are stored on your local system, scan an entire folder and its subfolders for models and import them automatically, and even directly import models from the internet by providing their download URLs. You also have the option of designating a local folder to scan for new models each time InvokeAI is restarted.
There are three ways of accessing the model management features:
1. ***From the WebUI***, click on the cube to the right of the model selection menu. This will bring up a form that allows you to import models individually from your local disk or scan a directory for models to import.
![image](https://user-images.githubusercontent.com/111189/220638091-918492cc-0719-4194-b033-3741e8289b30.png)
2. **Using the Model Installer App**
Choose option (5) _download and install models_ from the `invoke` launcher script to start a new console-based application for model management. You can use this to select from a curated set of starter models, or import checkpoint, safetensors, and diffusers models from a local disk or the internet. The example below shows importing two checkpoint URLs from popular SD sites and a HuggingFace diffusers model using its Repository ID. It also shows how to designate a folder to be scanned at startup time for new models to import.
Command-line users can start this app using the command `invokeai-model-install`.
![image](https://user-images.githubusercontent.com/111189/220660363-22ff3a2e-8082-410e-a818-d2b3a0529bac.png)
3. **Using the Command Line Client (CLI)**
The `!install_model` and `!convert_model` commands have been enhanced to allow entering of URLs and local directories to scan and import. The first command installs .ckpt and .safetensors files as-is. The second one converts them into the faster diffusers format before installation.
Internally InvokeAI is able to probe the contents of a .ckpt or .safetensors file to distinguish among v1.x, v2.x and inpainting models. This means that you do **not** need to include "inpaint" in your model names to use an inpainting model. Note that Stable Diffusion v2.x models will be autoconverted into a diffusers model the first time you use it.
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management.
#### An Improved Installer Experience
The installer now launches a console-based UI for setting and changing commonly-used startup options:
![image](https://user-images.githubusercontent.com/111189/220644777-3d3a90ca-f9e2-4e6d-93da-cbdd66bf12f3.png)
After selecting the desired options, the installer installs several support models needed by InvokeAI's face reconstruction and upscaling features and then launches the interface for selecting and installing models shown earlier. At any time, you can edit the startup options by launching `invoke.sh`/`invoke.bat` and entering option (6) _change InvokeAI startup options_
Command-line users can launch the new configure app using `invokeai-configure`.
This release also comes with a renewed updater. To do an update without going through a whole reinstallation, launch `invoke.sh` or `invoke.bat` and choose option (9) _update InvokeAI_ . This will bring you to a screen that prompts you to update to the latest released version, to the most current development version, or any released or unreleased version you choose by selecting the tag or branch of the desired version.
![image](https://user-images.githubusercontent.com/111189/220650124-30a77137-d9cd-406e-a87d-d8283f99a4b3.png)
Command-line users can run this interface by typing `invokeai-configure`
#### Image Symmetry Options
There are now features to generate horizontal and vertical symmetry during generation. The way these work is to wait until a selected step in the generation process and then to turn on a mirror image effect. In addition to generating some cool images, you can also use this to make side-by-side comparisons of how an image will look with more or fewer steps. Access this option from the WebUI by selecting _Symmetry_ from the image generation settings, or within the CLI by using the options `--h_symmetry_time_pct` and `--v_symmetry_time_pct` (these can be abbreviated to `--h_sym` and `--v_sym` like all other options).
![image](https://user-images.githubusercontent.com/111189/220658687-47fd0f2c-7069-4d95-aec9-7196fceb360d.png)
#### A New Unified Canvas Look
This release introduces a beta version of the WebUI Unified Canvas. To try it out, open up the settings dialogue in the WebUI (gear icon) and select _Use Canvas Beta Layout_:
![image](https://user-images.githubusercontent.com/111189/220646958-b7eca95e-dc39-4cd2-b277-63eac98ed446.png)
Refresh the screen and go to to Unified Canvas (left side of screen, third icon from the top). The new layout is designed to provide more space to work in and to keep the image controls close to the image itself:
![image](https://user-images.githubusercontent.com/111189/220647560-4a9265a1-6926-44f9-9d08-e1ef2ce61ff8.png)
#### Model conversion and merging within the WebUI
The WebUI now has an intuitive interface for model merging, as well as for permanent conversion of models from legacy .ckpt/.safetensors formats into diffusers format. These options are also available directly from the `invoke.sh`/`invoke.bat` scripts.
#### An easier way to contribute translations to the WebUI
We have migrated our translation efforts to [Weblate](https://hosted.weblate.org/engage/invokeai/), a FOSS translation product. Maintaining the growing project's translations is now far simpler for the maintainers and community. Please review our brief [translation guide](https://github.com/invoke-ai/InvokeAI/blob/v2.3.1/docs/other/TRANSLATION.md) for more information on how to contribute.
#### Numerous internal bugfixes and performance issues
This releases quashes multiple bugs that were reported in 2.3.0. Major internal changes include upgrading to `diffusers 0.13.0`, and using the `compel` library for prompt parsing. See [Detailed Change Log](#full-change-log) for a detailed list of bugs caught and squished.
#### Summary of InvokeAI command line scripts (all accessible via the launcher menu)
| Command | Description |
|--------------------------|---------------------------------------------------------------------|
| `invokeai` | Command line interface |
| `invokeai --web` | Web interface |
| `invokeai-model-install` | Model installer with console forms-based front end |
| `invokeai-ti --gui` | Textual inversion, with a console forms-based front end |
| `invokeai-merge --gui` | Model merging, with a console forms-based front end |
| `invokeai-configure` | Startup configuration; can also be used to reinstall support models |
| `invokeai-update` | InvokeAI software updater |
### v2.3.0 <small>(9 February 2023)</small>
#### Migration to Stable Diffusion `diffusers` models
Previous versions of InvokeAI supported the original model file format
introduced with Stable Diffusion 1.4. In the original format, known variously as
"checkpoint", or "legacy" format, there is a single large weights file ending
with `.ckpt` or `.safetensors`. Though this format has served the community
well, it has a number of disadvantages, including file size, slow loading times,
and a variety of non-standard variants that require special-case code to handle.
In addition, because checkpoint files are actually a bundle of multiple machine
learning sub-models, it is hard to swap different sub-models in and out, or to
share common sub-models. A new format, introduced by the StabilityAI company in
collaboration with HuggingFace, is called `diffusers` and consists of a
directory of individual models. The most immediate benefit of `diffusers` is
that they load from disk very quickly. A longer term benefit is that in the near
future `diffusers` models will be able to share common sub-models, dramatically
reducing disk space when you have multiple fine-tune models derived from the
same base.
Previous versions of InvokeAI supported the original model file format introduced with Stable Diffusion 1.4. In the original format, known variously as "checkpoint", or "legacy" format, there is a single large weights file ending with `.ckpt` or `.safetensors`. Though this format has served the community well, it has a number of disadvantages, including file size, slow loading times, and a variety of non-standard variants that require special-case code to handle. In addition, because checkpoint files are actually a bundle of multiple machine learning sub-models, it is hard to swap different sub-models in and out, or to share common sub-models. A new format, introduced by the StabilityAI company in collaboration with HuggingFace, is called `diffusers` and consists of a directory of individual models. The most immediate benefit of `diffusers` is that they load from disk very quickly. A longer term benefit is that in the near future `diffusers` models will be able to share common sub-models, dramatically reducing disk space when you have multiple fine-tune models derived from the same base.
When you perform a new install of version 2.3.0, you will be offered the option
to install the `diffusers` versions of a number of popular SD models, including
Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of
2.1). These will act and work just like the checkpoint versions. Do not be
concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk!
InvokeAI 2.3.0 can still load these and generate images from them without any
extra intervention on your part.
When you perform a new install of version 2.3.0, you will be offered the option to install the `diffusers` versions of a number of popular SD models, including Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of 2.1). These will act and work just like the checkpoint versions. Do not be concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk! InvokeAI 2.3.0 can still load these and generate images from them without any extra intervention on your part.
To take advantage of the optimized loading times of `diffusers` models, InvokeAI
offers options to convert legacy checkpoint models into optimized `diffusers`
models. If you use the `invokeai` command line interface, the relevant commands
are:
To take advantage of the optimized loading times of `diffusers` models, InvokeAI offers options to convert legacy checkpoint models into optimized `diffusers` models. If you use the `invokeai` command line interface, the relevant commands are:
- `!convert_model` -- Take the path to a local checkpoint file or a URL that
is pointing to one, convert it into a `diffusers` model, and import it into
InvokeAI's models registry file.
- `!optimize_model` -- If you already have a checkpoint model in your InvokeAI
models file, this command will accept its short name and convert it into a
like-named `diffusers` model, optionally deleting the original checkpoint
file.
- `!import_model` -- Take the local path of either a checkpoint file or a
`diffusers` model directory and import it into InvokeAI's registry file. You
may also provide the ID of any diffusers model that has been published on
the
[HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads)
and it will be downloaded and installed automatically.
* `!convert_model` -- Take the path to a local checkpoint file or a URL that is pointing to one, convert it into a `diffusers` model, and import it into InvokeAI's models registry file.
* `!optimize_model` -- If you already have a checkpoint model in your InvokeAI models file, this command will accept its short name and convert it into a like-named `diffusers` model, optionally deleting the original checkpoint file.
* `!import_model` -- Take the local path of either a checkpoint file or a `diffusers` model directory and import it into InvokeAI's registry file. You may also provide the ID of any diffusers model that has been published on the [HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads) and it will be downloaded and installed automatically.
The WebGUI offers similar functionality for model management.
For advanced users, new command-line options provide additional functionality.
Launching `invokeai` with the argument `--autoconvert <path to directory>` takes
the path to a directory of checkpoint files, automatically converts them into
`diffusers` models and imports them. Each time the script is launched, the
directory will be scanned for new checkpoint files to be loaded. Alternatively,
the `--ckpt_convert` argument will cause any checkpoint or safetensors model
that is already registered with InvokeAI to be converted into a `diffusers`
model on the fly, allowing you to take advantage of future diffusers-only
features without explicitly converting the model and saving it to disk.
For advanced users, new command-line options provide additional functionality. Launching `invokeai` with the argument `--autoconvert <path to directory>` takes the path to a directory of checkpoint files, automatically converts them into `diffusers` models and imports them. Each time the script is launched, the directory will be scanned for new checkpoint files to be loaded. Alternatively, the `--ckpt_convert` argument will cause any checkpoint or safetensors model that is already registered with InvokeAI to be converted into a `diffusers` model on the fly, allowing you to take advantage of future diffusers-only features without explicitly converting the model and saving it to disk.
Please see
[INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/)
for more information on model management in both the command-line and Web
interfaces.
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management in both the command-line and Web interfaces.
#### Support for the `XFormers` Memory-Efficient Crossattention Package
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once
installed, the`xformers` package dramatically reduces the memory footprint of
loaded Stable Diffusion models files and modestly increases image generation
speed. `xformers` will be installed and activated automatically if you specify a
CUDA system at install time.
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once installed, the`xformers` package dramatically reduces the memory footprint of loaded Stable Diffusion models files and modestly increases image generation speed. `xformers` will be installed and activated automatically if you specify a CUDA system at install time.
The caveat with using `xformers` is that it introduces slightly
non-deterministic behavior, and images generated using the same seed and other
settings will be subtly different between invocations. Generally the changes are
unnoticeable unless you rapidly shift back and forth between images, but to
disable `xformers` and restore fully deterministic behavior, you may launch
InvokeAI using the `--no-xformers` option. This is most conveniently done by
opening the file `invokeai/invokeai.init` with a text editor, and adding the
line `--no-xformers` at the bottom.
The caveat with using `xformers` is that it introduces slightly non-deterministic behavior, and images generated using the same seed and other settings will be subtly different between invocations. Generally the changes are unnoticeable unless you rapidly shift back and forth between images, but to disable `xformers` and restore fully deterministic behavior, you may launch InvokeAI using the `--no-xformers` option. This is most conveniently done by opening the file `invokeai/invokeai.init` with a text editor, and adding the line `--no-xformers` at the bottom.
#### A Negative Prompt Box in the WebUI
There is now a separate text input box for negative prompts in the WebUI. This
is convenient for stashing frequently-used negative prompts ("mangled limbs, bad
anatomy"). The `[negative prompt]` syntax continues to work in the main prompt
box as well.
There is now a separate text input box for negative prompts in the WebUI. This is convenient for stashing frequently-used negative prompts ("mangled limbs, bad anatomy"). The `[negative prompt]` syntax continues to work in the main prompt box as well.
To see exactly how your prompts are being parsed, launch `invokeai` with the
`--log_tokenization` option. The console window will then display the
tokenization process for both positive and negative prompts.
To see exactly how your prompts are being parsed, launch `invokeai` with the `--log_tokenization` option. The console window will then display the tokenization process for both positive and negative prompts.
#### Model Merging
Version 2.3.0 offers an intuitive user interface for merging up to three Stable
Diffusion models using an intuitive user interface. Model merging allows you to
mix the behavior of models to achieve very interesting effects. To use this,
each of the models must already be imported into InvokeAI and saved in
`diffusers` format, then launch the merger using a new menu item in the InvokeAI
launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line
with `invokeai-merge --gui`. You will be prompted to select the models to merge,
the proportions in which to mix them, and the mixing algorithm. The script will
create a new merged `diffusers` model and import it into InvokeAI for your use.
Version 2.3.0 offers an intuitive user interface for merging up to three Stable Diffusion models using an intuitive user interface. Model merging allows you to mix the behavior of models to achieve very interesting effects. To use this, each of the models must already be imported into InvokeAI and saved in `diffusers` format, then launch the merger using a new menu item in the InvokeAI launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line with `invokeai-merge --gui`. You will be prompted to select the models to merge, the proportions in which to mix them, and the mixing algorithm. The script will create a new merged `diffusers` model and import it into InvokeAI for your use.
See
[MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/)
for more details.
See [MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/) for more details.
#### Textual Inversion Training
Textual Inversion (TI) is a technique for training a Stable Diffusion model to
emit a particular subject or style when triggered by a keyword phrase. You can
perform TI training by placing a small number of images of the subject or style
in a directory, and choosing a distinctive trigger phrase, such as
"pointillist-style". After successful training, The subject or style will be
activated by including `<pointillist-style>` in your prompt.
Textual Inversion (TI) is a technique for training a Stable Diffusion model to emit a particular subject or style when triggered by a keyword phrase. You can perform TI training by placing a small number of images of the subject or style in a directory, and choosing a distinctive trigger phrase, such as "pointillist-style". After successful training, The subject or style will be activated by including `<pointillist-style>` in your prompt.
Previous versions of InvokeAI were able to perform TI, but it required using a
command-line script with dozens of obscure command-line arguments. Version 2.3.0
features an intuitive TI frontend that will build a TI model on top of any
`diffusers` model. To access training you can launch from a new item in the
launcher script or from the command line using `invokeai-ti --gui`.
Previous versions of InvokeAI were able to perform TI, but it required using a command-line script with dozens of obscure command-line arguments. Version 2.3.0 features an intuitive TI frontend that will build a TI model on top of any `diffusers` model. To access training you can launch from a new item in the launcher script or from the command line using `invokeai-ti --gui`.
See
[TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/)
for further details.
See [TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/) for further details.
#### A New Installer Experience
The InvokeAI installer has been upgraded in order to provide a smoother and
hopefully more glitch-free experience. In addition, InvokeAI is now packaged as
a PyPi project, allowing developers and power-users to install InvokeAI with the
command `pip install InvokeAI --use-pep517`. Please see
[Installation](#installation) for details.
The InvokeAI installer has been upgraded in order to provide a smoother and hopefully more glitch-free experience. In addition, InvokeAI is now packaged as a PyPi project, allowing developers and power-users to install InvokeAI with the command `pip install InvokeAI --use-pep517`. Please see [Installation](#installation) for details.
Developers should be aware that the `pip` installation procedure has been
simplified and that the `conda` method is no longer supported at all.
Accordingly, the `environments_and_requirements` directory has been deleted from
the repository.
Developers should be aware that the `pip` installation procedure has been simplified and that the `conda` method is no longer supported at all. Accordingly, the `environments_and_requirements` directory has been deleted from the repository.
#### Command-line name changes
All of InvokeAI's functionality, including the WebUI, command-line interface,
textual inversion training and model merging, can all be accessed from the
`invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been
expanded to add the new functionality. For the convenience of developers and
power users, we have normalized the names of the InvokeAI command-line scripts:
All of InvokeAI's functionality, including the WebUI, command-line interface, textual inversion training and model merging, can all be accessed from the `invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been expanded to add the new functionality. For the convenience of developers and power users, we have normalized the names of the InvokeAI command-line scripts:
- `invokeai` -- Command-line client
- `invokeai --web` -- Web GUI
- `invokeai-merge --gui` -- Model merging script with graphical front end
- `invokeai-ti --gui` -- Textual inversion script with graphical front end
- `invokeai-configure` -- Configuration tool for initializing the `invokeai`
directory and selecting popular starter models.
* `invokeai` -- Command-line client
* `invokeai --web` -- Web GUI
* `invokeai-merge --gui` -- Model merging script with graphical front end
* `invokeai-ti --gui` -- Textual inversion script with graphical front end
* `invokeai-configure` -- Configuration tool for initializing the `invokeai` directory and selecting popular starter models.
For backward compatibility, the old command names are also recognized, including
`invoke.py` and `configure-invokeai.py`. However, these are deprecated and will
eventually be removed.
For backward compatibility, the old command names are also recognized, including `invoke.py` and `configure-invokeai.py`. However, these are deprecated and will eventually be removed.
Developers should be aware that the locations of the script's source code has
been moved. The new locations are:
Developers should be aware that the locations of the script's source code has been moved. The new locations are:
* `invokeai` => `ldm/invoke/CLI.py`
* `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
* `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
* `invokeai-merge` => `ldm/invoke/merge_diffusers`
- `invokeai` => `ldm/invoke/CLI.py`
- `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
- `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
- `invokeai-merge` => `ldm/invoke/merge_diffusers`
Developers are strongly encouraged to perform an "editable" install of InvokeAI using `pip install -e . --use-pep517` in the Git repository, and then to call the scripts using their 2.3.0 names, rather than executing the scripts directly. Developers should also be aware that the several important data files have been relocated into a new directory named `invokeai`. This includes the WebGUI's `frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used by the installer to select starter models. Eventually all InvokeAI modules will be in subdirectories of `invokeai`.
Developers are strongly encouraged to perform an "editable" install of InvokeAI
using `pip install -e . --use-pep517` in the Git repository, and then to call
the scripts using their 2.3.0 names, rather than executing the scripts directly.
Developers should also be aware that the several important data files have been
relocated into a new directory named `invokeai`. This includes the WebGUI's
`frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used
by the installer to select starter models. Eventually all InvokeAI modules will
be in subdirectories of `invokeai`.
Please see
[2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0)
for further details. For older changelogs, please visit the
Please see [2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0) for further details.
For older changelogs, please visit the
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
## :material-target: Troubleshooting
Please check out our
**[:material-frequently-asked-questions: Troubleshooting Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)**
to get solutions for common installation problems and other issues.
Please check out our **[:material-frequently-asked-questions:
Troubleshooting
Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)** to
get solutions for common installation problems and other issues.
## :octicons-repo-push-24: Contributing
@@ -541,8 +265,8 @@ thank them for their time, hard work and effort.
For support, please use this repository's GitHub Issues tracking service. Feel
free to send me an email if you use and like the script.
Original portions of the software are Copyright (c) 2022-23 by
[The InvokeAI Team](https://github.com/invoke-ai).
Original portions of the software are Copyright (c) 2022-23
by [The InvokeAI Team](https://github.com/invoke-ai).
## :octicons-book-24: Further Reading

View File

@@ -417,7 +417,7 @@ Then type the following commands:
=== "AMD System"
```bash
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.2
```
### Corrupted configuration file

View File

@@ -77,7 +77,7 @@ machine. To test, open up a terminal window and issue the following
command:
```
rocminfo
rocm-smi
```
If you get a table labeled "ROCm System Management Interface" the
@@ -95,17 +95,9 @@ recent version of Ubuntu, 22.04. However, this [community-contributed
recipe](https://novaspirit.github.io/amdgpu-rocm-ubu22/) is reported
to work well.
After installation, please run `rocminfo` a second time to confirm
After installation, please run `rocm-smi` a second time to confirm
that the driver is present and the GPU is recognized. You may need to
do a reboot in order to load the driver. In addition, if you see
errors relating to your username not being a member of the `render`
group, you may fix this by adding yourself to this group with the command:
```
sudo usermod -a -G render myUserName
```
(Thanks to @EgoringKosmos for the usermod recipe.)
do a reboot in order to load the driver.
### Linux Install with a ROCm-docker Container
@@ -118,7 +110,7 @@ recipes are available
When installing torch and torchvision manually with `pip`, remember to provide
the argument `--extra-index-url
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
https://download.pytorch.org/whl/rocm5.2` as described in the [Manual
Installation Guide](020_INSTALL_MANUAL.md).
This will be done automatically for you if you use the installer

View File

@@ -11,7 +11,7 @@ The model checkpoint files ('\*.ckpt') are the Stable Diffusion
captioned images gathered from multiple sources.
Originally there was only a single Stable Diffusion weights file,
which many people named `model.ckpt`. Now there are hundreds
which many people named `model.ckpt`. Now there are dozens or more
that have been fine tuned to provide particulary styles, genres, or
other features. In addition, there are several new formats that
improve on the original checkpoint format: a `.safetensors` format
@@ -29,10 +29,9 @@ and performance are being made at a rapid pace. Among other features
is the ability to download and install a `diffusers` model just by
providing its HuggingFace repository ID.
While InvokeAI will continue to support legacy `.ckpt` and `.safetensors`
While InvokeAI will continue to support `.ckpt` and `.safetensors`
models for the near future, these are deprecated and support will
be withdrawn in version 3.0, after which all legacy models will be
converted into diffusers at the time they are loaded.
likely be withdrawn at some point in the not-too-distant future.
This manual will guide you through installing and configuring model
weight files and converting legacy `.ckpt` and `.safetensors` files
@@ -90,18 +89,15 @@ aware that CIVITAI hosts many models that generate NSFW content.
!!! note
InvokeAI 2.3.x does not support directly importing and
running Stable Diffusion version 2 checkpoint models. If you
try to import them, they will be automatically
converted into `diffusers` models on the fly. This adds about 20s
to loading time. To avoid this overhead, you are encouraged to
use one of the conversion methods described below to convert them
permanently.
running Stable Diffusion version 2 checkpoint models. You may instead
convert them into `diffusers` models using the conversion methods
described below.
## Installation
There are multiple ways to install and manage models:
1. The `invokeai-model-install` script which will download and install them for you.
1. The `invokeai-configure` script which will download and install them for you.
2. The command-line tool (CLI) has commands that allows you to import, configure and modify
models files.
@@ -109,41 +105,14 @@ There are multiple ways to install and manage models:
3. The web interface (WebUI) has a GUI for importing and managing
models.
### Installation via `invokeai-model-install`
### Installation via `invokeai-configure`
From the `invoke` launcher, choose option (5) "Download and install
models." This will launch the same script that prompted you to select
models at install time. You can use this to add models that you
skipped the first time around. It is all right to specify a model that
was previously downloaded; the script will just confirm that the files
are complete.
This script allows you to load 3d party models. Look for a large text
entry box labeled "IMPORT LOCAL AND REMOTE MODELS." In this box, you
can cut and paste one or more of any of the following:
1. A URL that points to a downloadable .ckpt or .safetensors file.
2. A file path pointing to a .ckpt or .safetensors file.
3. A diffusers model repo_id (from HuggingFace) in the format
"owner/repo_name".
4. A directory path pointing to a diffusers model directory.
5. A directory path pointing to a directory containing a bunch of
.ckpt and .safetensors files. All will be imported.
You can enter multiple items into the textbox, each one on a separate
line. You can paste into the textbox using ctrl-shift-V or by dragging
and dropping a file/directory from the desktop into the box.
The script also lets you designate a directory that will be scanned
for new model files each time InvokeAI starts up. These models will be
added automatically.
Lastly, the script gives you a checkbox option to convert legacy models
into diffusers, or to run the legacy model directly. If you choose to
convert, the original .ckpt/.safetensors file will **not** be deleted,
but a new diffusers directory will be created, using twice your disk
space. However, the diffusers version will load faster, and will be
compatible with InvokeAI 3.0.
From the `invoke` launcher, choose option (6) "re-run the configure
script to download new models." This will launch the same script that
prompted you to select models at install time. You can use this to add
models that you skipped the first time around. It is all right to
specify a model that was previously downloaded; the script will just
confirm that the files are complete.
### Installation via the CLI
@@ -175,15 +144,19 @@ invoke> !import_model https://example.org/sd_models/martians.safetensors
For this to work, the URL must not be password-protected. Otherwise
you will receive a 404 error.
When you import a legacy model, the CLI will try to figure out what
type of model it is and select the correct load configuration file.
However, one thing it can't do is to distinguish between Stable
Diffusion 2.x models trained on 512x512 vs 768x768 images. In this
case, the CLI will pop up a menu of choices, asking you to select
which type of model it is. Please consult the model documentation to
identify the correct answer, as loading with the wrong configuration
will lead to black images. You can correct the model type after the
fact using the `!edit_model` command.
When you import a legacy model, the CLI will first ask you what type
of model this is. You can indicate whether it is a model based on
Stable Diffusion 1.x (1.4 or 1.5), one based on Stable Diffusion 2.x,
or a 1.x inpainting model. Be careful to indicate the correct model
type, or it will not load correctly. You can correct the model type
after the fact using the `!edit_model` command.
The system will then ask you a few other questions about the model,
including what size image it was trained on (usually 512x512), what
name and description you wish to use for it, and whether you would
like to install a custom VAE (variable autoencoder) file for the
model. For recent models, the answer to the VAE question is usually
"no," but it won't hurt to answer "yes".
After importing, the model will load. If this is successful, you will
be asked if you want to keep the model loaded in memory to start
@@ -238,6 +211,109 @@ description for the model, whether to make this the default model that
is loaded at InvokeAI startup time, and whether to replace its
VAE. Generally the answer to the latter question is "no".
### Converting legacy models into `diffusers`
The CLI `!convert_model` will convert a `.safetensors` or `.ckpt`
models file into `diffusers` and install it.This will enable the model
to load and run faster without loss of image quality.
The usage is identical to `!import_model`. You may point the command
to either a downloaded model file on disk, or to a (non-password
protected) URL:
```bash
invoke> !convert_model C:/Users/fred/Downloads/martians.safetensors
```
After a successful conversion, the CLI will offer you the option of
deleting the original `.ckpt` or `.safetensors` file.
### Optimizing a previously-installed model
Lastly, if you have previously installed a `.ckpt` or `.safetensors`
file and wish to convert it into a `diffusers` model, you can do this
without re-downloading and converting the original file using the
`!optimize_model` command. Simply pass the short name of an existing
installed model:
```bash
invoke> !optimize_model martians-v1.0
```
The model will be converted into `diffusers` format and replace the
previously installed version. You will again be offered the
opportunity to delete the original `.ckpt` or `.safetensors` file.
### Related CLI Commands
There are a whole series of additional model management commands in
the CLI that you can read about in [Command-Line
Interface](../features/CLI.md). These include:
* `!models` - List all installed models
* `!switch <model name>` - Switch to the indicated model
* `!edit_model <model name>` - Edit the indicated model to change its name, description or other properties
* `!del_model <model name>` - Delete the indicated model
### Manually editing `configs/models.yaml`
If you are comfortable with a text editor then you may simply edit `models.yaml`
directly.
You will need to download the desired `.ckpt/.safetensors` file and
place it somewhere on your machine's filesystem. Alternatively, for a
`diffusers` model, record the repo_id or download the whole model
directory. Then using a **text** editor (e.g. the Windows Notepad
application), open the file `configs/models.yaml`, and add a new
stanza that follows this model:
#### A legacy model
A legacy `.ckpt` or `.safetensors` entry will look like this:
```yaml
arabian-nights-1.0:
description: A great fine-tune in Arabian Nights style
weights: ./path/to/arabian-nights-1.0.ckpt
config: ./configs/stable-diffusion/v1-inference.yaml
format: ckpt
width: 512
height: 512
default: false
```
Note that `format` is `ckpt` for both `.ckpt` and `.safetensors` files.
#### A diffusers model
A stanza for a `diffusers` model will look like this for a HuggingFace
model with a repository ID:
```yaml
arabian-nights-1.1:
description: An even better fine-tune of the Arabian Nights
repo_id: captahab/arabian-nights-1.1
format: diffusers
default: true
```
And for a downloaded directory:
```yaml
arabian-nights-1.1:
description: An even better fine-tune of the Arabian Nights
path: /path/to/captahab-arabian-nights-1.1
format: diffusers
default: true
```
There is additional syntax for indicating an external VAE to use with
this model. See `INITIAL_MODELS.yaml` and `models.yaml` for examples.
After you save the modified `models.yaml` file relaunch
`invokeai`. The new model will now be available for your use.
### Installation via the WebUI
To access the WebUI Model Manager, click on the button that looks like
@@ -317,143 +393,3 @@ And here is what the same argument looks like in `invokeai.init`:
--no-nsfw_checker
--autoconvert /home/fred/stable-diffusion-checkpoints
```
### Specifying a configuration file for legacy checkpoints
Some checkpoint files come with instructions to use a specific .yaml
configuration file. For InvokeAI load this file correctly, please put
the config file in the same directory as the corresponding `.ckpt` or
`.safetensors` file and make sure the file has the same basename as
the model file. Here is an example:
```bash
wonderful-model-v2.ckpt
wonderful-model-v2.yaml
```
This is not needed for `diffusers` models, which come with their own
pre-packaged configuration.
### Specifying a custom VAE file for legacy checkpoints
To associate a custom VAE with a legacy file, place the VAE file in
the same directory as the corresponding `.ckpt` or
`.safetensors` file and make sure the file has the same basename as
the model file. Use the suffix `.vae.pt` for VAE checkpoint files, and
`.vae.safetensors` for VAE safetensors files. There is no requirement
that both the model and the VAE follow the same format.
Example:
```bash
wonderful-model-v2.pt
wonderful-model-v2.vae.safetensors
```
### Converting legacy models into `diffusers`
The CLI `!convert_model` will convert a `.safetensors` or `.ckpt`
models file into `diffusers` and install it.This will enable the model
to load and run faster without loss of image quality.
The usage is identical to `!import_model`. You may point the command
to either a downloaded model file on disk, or to a (non-password
protected) URL:
```bash
invoke> !convert_model C:/Users/fred/Downloads/martians.safetensors
```
After a successful conversion, the CLI will offer you the option of
deleting the original `.ckpt` or `.safetensors` file.
### Optimizing a previously-installed model
Lastly, if you have previously installed a `.ckpt` or `.safetensors`
file and wish to convert it into a `diffusers` model, you can do this
without re-downloading and converting the original file using the
`!optimize_model` command. Simply pass the short name of an existing
installed model:
```bash
invoke> !optimize_model martians-v1.0
```
The model will be converted into `diffusers` format and replace the
previously installed version. You will again be offered the
opportunity to delete the original `.ckpt` or `.safetensors` file.
Alternatively you can use the WebUI's model manager to handle diffusers
optimization. Select the legacy model you wish to convert, and then
look for a button labeled "Convert to Diffusers" in the upper right of
the window.
### Related CLI Commands
There are a whole series of additional model management commands in
the CLI that you can read about in [Command-Line
Interface](../features/CLI.md). These include:
* `!models` - List all installed models
* `!switch <model name>` - Switch to the indicated model
* `!edit_model <model name>` - Edit the indicated model to change its name, description or other properties
* `!del_model <model name>` - Delete the indicated model
### Manually editing `configs/models.yaml`
If you are comfortable with a text editor then you may simply edit `models.yaml`
directly.
You will need to download the desired `.ckpt/.safetensors` file and
place it somewhere on your machine's filesystem. Alternatively, for a
`diffusers` model, record the repo_id or download the whole model
directory. Then using a **text** editor (e.g. the Windows Notepad
application), open the file `configs/models.yaml`, and add a new
stanza that follows this model:
#### A legacy model
A legacy `.ckpt` or `.safetensors` entry will look like this:
```yaml
arabian-nights-1.0:
description: A great fine-tune in Arabian Nights style
weights: ./path/to/arabian-nights-1.0.ckpt
config: ./configs/stable-diffusion/v1-inference.yaml
format: ckpt
width: 512
height: 512
default: false
```
Note that `format` is `ckpt` for both `.ckpt` and `.safetensors` files.
#### A diffusers model
A stanza for a `diffusers` model will look like this for a HuggingFace
model with a repository ID:
```yaml
arabian-nights-1.1:
description: An even better fine-tune of the Arabian Nights
repo_id: captahab/arabian-nights-1.1
format: diffusers
default: true
```
And for a downloaded directory:
```yaml
arabian-nights-1.1:
description: An even better fine-tune of the Arabian Nights
path: /path/to/captahab-arabian-nights-1.1
format: diffusers
default: true
```
There is additional syntax for indicating an external VAE to use with
this model. See `INITIAL_MODELS.yaml` and `models.yaml` for examples.
After you save the modified `models.yaml` file relaunch
`invokeai`. The new model will now be available for your use.

View File

@@ -23,16 +23,14 @@ We thank them for all of their time and hard work.
* @damian0815 - Attention Systems and Gameplay Engineer
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
* @tildebyte - General gadfly and resident (self-appointed) know-it-all
* @keturn - Lead for Diffusers port
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
* @jpphoto (Jonathan Pollack) - Inference and rendering engine optimization
* @genomancer (Gregg Helt) - Model training and merging
* @gogurtenjoyer - User support and testing
* @whosawwhatsis - User support and testing
## **Contributions by**
- [tildebyte](https://github.com/tildebyte)
- [Sean McLellan](https://github.com/Oceanswave)
- [Kevin Gibbons](https://github.com/bakkot)
- [Tesseract Cat](https://github.com/TesseractCat)
@@ -80,7 +78,6 @@ We thank them for all of their time and hard work.
- [psychedelicious](https://github.com/psychedelicious)
- [damian0815](https://github.com/damian0815)
- [Eugene Brodsky](https://github.com/ebr)
- [Statcomm](https://github.com/statcomm)
## **Original CompVis Authors**

View File

@@ -0,0 +1,5 @@
mkdocs
mkdocs-material>=8, <9
mkdocs-git-revision-date-localized-plugin
mkdocs-redirects==1.2.0

View File

@@ -144,8 +144,8 @@ class Installer:
from plumbum import FG, local
python = local[get_python_from_venv(venv_dir)]
python[ "-m", "pip", "install", "--upgrade", "pip"] & FG
pip = local[get_pip_from_venv(venv_dir)]
pip[ "install", "--upgrade", "pip"] & FG
return venv_dir
@@ -241,18 +241,14 @@ class InvokeAiInstance:
from plumbum import FG, local
# Note that we're installing pinned versions of torch and
# torchvision here, which *should* correspond to what is
# in pyproject.toml. This is to prevent torch 2.0 from
# being installed and immediately uninstalled and replaced with 1.13
pip = local[self.pip]
(
pip[
"install",
"--require-virtualenv",
"torch~=1.13.1",
"torchvision~=0.14.1",
"torch",
"torchvision",
"--force-reinstall",
"--find-links" if find_links is not None else None,
find_links,
@@ -383,9 +379,6 @@ class InvokeAiInstance:
shutil.copy(src, dest)
os.chmod(dest, 0o0755)
if OS == "Linux":
shutil.copy(Path(__file__).parents[1] / "templates" / "dialogrc", self.runtime / '.dialogrc')
def update(self):
pass
@@ -412,22 +405,6 @@ def get_pip_from_venv(venv_path: Path) -> str:
return str(venv_path.expanduser().resolve() / pip)
def get_python_from_venv(venv_path: Path) -> str:
"""
Given a path to a virtual environment, get the absolute path to the `python` executable
in a cross-platform fashion. Does not validate that the python executable
actually exists in the virtualenv.
:param venv_path: Path to the virtual environment
:type venv_path: Path
:return: Absolute path to the python executable
:rtype: str
"""
python = "Scripts\python.exe" if OS == "Windows" else "bin/python"
return str(venv_path.expanduser().resolve() / python)
def set_sys_path(venv_path: Path) -> None:
"""
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,

View File

@@ -1,27 +0,0 @@
# Screen
use_shadow = OFF
use_colors = ON
screen_color = (BLACK, BLACK, ON)
# Box
dialog_color = (YELLOW, BLACK , ON)
title_color = (YELLOW, BLACK, ON)
border_color = (YELLOW, BLACK, OFF)
border2_color = (YELLOW, BLACK, OFF)
# Button
button_active_color = (RED, BLACK, OFF)
button_inactive_color = (YELLOW, BLACK, OFF)
button_label_active_color = (YELLOW,BLACK,ON)
button_label_inactive_color = (YELLOW,BLACK,ON)
# Menu box
menubox_color = (BLACK, BLACK, ON)
menubox_border_color = (YELLOW, BLACK, OFF)
menubox_border2_color = (YELLOW, BLACK, OFF)
# Menu window
item_color = (YELLOW, BLACK, OFF)
item_selected_color = (BLACK, YELLOW, OFF)
tag_key_color = (YELLOW, BLACK, OFF)
tag_key_selected_color = (BLACK, YELLOW, OFF)

View File

@@ -1,10 +1,5 @@
#!/bin/bash
# MIT License
# Coauthored by Lincoln Stein, Eugene Brodsky and Joshua Kimsey
# Copyright 2023, The InvokeAI Development Team
####
# This launch script assumes that:
# 1. it is located in the runtime directory,
@@ -16,168 +11,85 @@
set -eu
# Ensure we're in the correct folder in case user's CWD is somewhere else
# ensure we're in the correct folder in case user's CWD is somewhere else
scriptdir=$(dirname "$0")
cd "$scriptdir"
. .venv/bin/activate
export INVOKEAI_ROOT="$scriptdir"
PARAMS=$@
# Check to see if dialog is installed (it seems to be fairly standard, but good to check regardless) and if the user has passed the --no-tui argument to disable the dialog TUI
tui=true
if command -v dialog &>/dev/null; then
# This must use $@ to properly loop through the arguments passed by the user
for arg in "$@"; do
if [ "$arg" == "--no-tui" ]; then
tui=false
# Remove the --no-tui argument to avoid errors later on when passing arguments to InvokeAI
PARAMS=$(echo "$PARAMS" | sed 's/--no-tui//')
break
fi
done
else
tui=false
fi
# Set required env var for torch on mac MPS
# set required env var for torch on mac MPS
if [ "$(uname -s)" == "Darwin" ]; then
export PYTORCH_ENABLE_MPS_FALLBACK=1
fi
# Primary function for the case statement to determine user input
do_choice() {
case $1 in
1)
clear
printf "Generate images with a browser-based interface\n"
invokeai --web $PARAMS
;;
2)
clear
printf "Generate images using a command-line interface\n"
invokeai $PARAMS
;;
3)
clear
printf "Textual inversion training\n"
invokeai-ti --gui $PARAMS
;;
4)
clear
printf "Merge models (diffusers type only)\n"
invokeai-merge --gui $PARAMS
;;
5)
clear
printf "Download and install models\n"
invokeai-model-install --root ${INVOKEAI_ROOT}
;;
6)
clear
printf "Change InvokeAI startup options\n"
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
;;
7)
clear
printf "Re-run the configure script to fix a broken install\n"
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
;;
8)
clear
printf "Open the developer console\n"
file_name=$(basename "${BASH_SOURCE[0]}")
bash --init-file "$file_name"
;;
9)
clear
printf "Update InvokeAI\n"
invokeai-update
;;
10)
clear
printf "Command-line help\n"
invokeai --help
;;
"HELP 1")
clear
printf "Command-line help\n"
invokeai --help
;;
*)
clear
printf "Exiting...\n"
exit
;;
esac
clear
}
# Dialog-based TUI for launcing Invoke functions
do_dialog() {
options=(
1 "Generate images with a browser-based interface"
2 "Generate images using a command-line interface"
3 "Textual inversion training"
4 "Merge models (diffusers type only)"
5 "Download and install models"
6 "Change InvokeAI startup options"
7 "Re-run the configure script to fix a broken install"
8 "Open the developer console"
9 "Update InvokeAI")
choice=$(dialog --clear \
--backtitle "\Zb\Zu\Z3InvokeAI" \
--colors \
--title "What would you like to run?" \
--ok-label "Run" \
--cancel-label "Exit" \
--help-button \
--help-label "CLI Help" \
--menu "Select an option:" \
0 0 0 \
"${options[@]}" \
2>&1 >/dev/tty) || clear
do_choice "$choice"
clear
}
# Command-line interface for launching Invoke functions
do_line_input() {
clear
printf " ** For a more attractive experience, please install the 'dialog' utility using your package manager. **\n\n"
printf "Do you want to generate images using the\n"
printf "1: Browser-based UI\n"
printf "2: Command-line interface\n"
printf "3: Run textual inversion training\n"
printf "4: Merge models (diffusers type only)\n"
printf "5: Download and install models\n"
printf "6: Change InvokeAI startup options\n"
printf "7: Re-run the configure script to fix a broken install\n"
printf "8: Open the developer console\n"
printf "9: Update InvokeAI\n"
printf "10: Command-line help\n"
printf "Q: Quit\n\n"
read -p "Please enter 1-10, Q: [1] " yn
choice=${yn:='1'}
do_choice $choice
clear
}
# Main IF statement for launching Invoke with either the TUI or CLI, and for checking if the user is in the developer console
while true
do
if [ "$0" != "bash" ]; then
while true; do
if $tui; then
# .dialogrc must be located in the same directory as the invoke.sh script
export DIALOGRC="./.dialogrc"
do_dialog
else
do_line_input
fi
done
echo "Do you want to generate images using the"
echo "1. command-line interface"
echo "2. browser-based UI"
echo "3. run textual inversion training"
echo "4. merge models (diffusers type only)"
echo "5. download and install models"
echo "6. change InvokeAI startup options"
echo "7. re-run the configure script to fix a broken install"
echo "8. open the developer console"
echo "9. update InvokeAI"
echo "10. command-line help"
echo "Q - Quit"
echo ""
read -p "Please enter 1-10, Q: [2] " yn
choice=${yn:='2'}
case $choice in
1)
echo "Starting the InvokeAI command-line..."
invokeai $@
;;
2)
echo "Starting the InvokeAI browser-based UI..."
invokeai --web $@
;;
3)
echo "Starting Textual Inversion:"
invokeai-ti --gui $@
;;
4)
echo "Merging Models:"
invokeai-merge --gui $@
;;
5)
invokeai-model-install --root ${INVOKEAI_ROOT}
;;
6)
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
;;
7)
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
;;
8)
echo "Developer Console:"
file_name=$(basename "${BASH_SOURCE[0]}")
bash --init-file "$file_name"
;;
9)
echo "Update:"
invokeai-update
;;
10)
invokeai --help
;;
[qQ])
exit 0
;;
*)
echo "Invalid selection"
exit;;
esac
else # in developer console
python --version
printf "Press ^D to exit\n"
echo "Press ^D to exit"
export PS1="(InvokeAI) \u@\h \w> "
fi
done

View File

@@ -25,23 +25,14 @@ from invokeai.backend.modules.parameters import parameters_to_command
import invokeai.frontend.dist as frontend
from ldm.generate import Generate
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary
from ldm.invoke.conditioning import (
get_tokens_for_prompt_object,
get_prompt_structure,
split_weighted_subprompts,
get_tokenizer,
)
from ldm.invoke.conditioning import get_tokens_for_prompt_object, get_prompt_structure, split_weighted_subprompts, \
get_tokenizer
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
from ldm.invoke.generator.inpaint import infill_methods
from ldm.invoke.globals import (
Globals,
global_converted_ckpts_dir,
global_models_dir,
global_lora_models_dir,
)
from ldm.invoke.globals import Globals, global_converted_ckpts_dir
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
from compel.prompt_parser import Blend
from ldm.invoke.globals import global_models_dir
from ldm.invoke.merge_diffusers import merge_diffusion_models
# Loading Arguments
@@ -202,7 +193,8 @@ class InvokeAIWebServer:
(width, height) = pil_image.size
thumbnail_path = save_thumbnail(
pil_image, os.path.basename(file_path), self.thumbnail_image_path
pil_image, os.path.basename(
file_path), self.thumbnail_image_path
)
response = {
@@ -232,7 +224,7 @@ class InvokeAIWebServer:
server="flask_socketio",
width=1600,
height=1000,
port=self.port,
port=self.port
).run()
except KeyboardInterrupt:
import sys
@@ -273,14 +265,16 @@ class InvokeAIWebServer:
# location for "finished" images
self.result_path = args.outdir
# temporary path for intermediates
self.intermediate_path = os.path.join(self.result_path, "intermediates/")
self.intermediate_path = os.path.join(
self.result_path, "intermediates/")
# path for user-uploaded init images and masks
self.init_image_path = os.path.join(self.result_path, "init-images/")
self.mask_image_path = os.path.join(self.result_path, "mask-images/")
# path for temp images e.g. gallery generations which are not committed
self.temp_image_path = os.path.join(self.result_path, "temp-images/")
# path for thumbnail images
self.thumbnail_image_path = os.path.join(self.result_path, "thumbnails/")
self.thumbnail_image_path = os.path.join(
self.result_path, "thumbnails/")
# txt log
self.log_path = os.path.join(self.result_path, "invoke_log.txt")
# make all output paths
@@ -305,22 +299,21 @@ class InvokeAIWebServer:
config["infill_methods"] = infill_methods()
socketio.emit("systemConfig", config)
@socketio.on("searchForModels")
@socketio.on('searchForModels')
def handle_search_models(search_folder: str):
try:
if not search_folder:
socketio.emit(
"foundModels",
{"search_folder": None, "found_models": None},
{'search_folder': None, 'found_models': None},
)
else:
(
search_folder,
found_models,
) = self.generate.model_manager.search_models(search_folder)
search_folder, found_models = self.generate.model_manager.search_models(
search_folder)
socketio.emit(
"foundModels",
{"search_folder": search_folder, "found_models": found_models},
{'search_folder': search_folder,
'found_models': found_models},
)
except Exception as e:
self.handle_exceptions(e)
@@ -329,11 +322,11 @@ class InvokeAIWebServer:
@socketio.on("addNewModel")
def handle_add_model(new_model_config: dict):
try:
model_name = new_model_config["name"]
del new_model_config["name"]
model_name = new_model_config['name']
del new_model_config['name']
model_attributes = new_model_config
if len(model_attributes["vae"]) == 0:
del model_attributes["vae"]
if len(model_attributes['vae']) == 0:
del model_attributes['vae']
update = False
current_model_list = self.generate.model_manager.list_models()
if model_name in current_model_list:
@@ -342,20 +335,14 @@ class InvokeAIWebServer:
print(f">> Adding New Model: {model_name}")
self.generate.model_manager.add_model(
model_name=model_name,
model_attributes=model_attributes,
clobber=True,
)
model_name=model_name, model_attributes=model_attributes, clobber=True)
self.generate.model_manager.commit(opt.conf)
new_model_list = self.generate.model_manager.list_models()
socketio.emit(
"newModelAdded",
{
"new_model_name": model_name,
"model_list": new_model_list,
"update": update,
},
{"new_model_name": model_name,
"model_list": new_model_list, 'update': update},
)
print(f">> New Model Added: {model_name}")
except Exception as e:
@@ -370,10 +357,8 @@ class InvokeAIWebServer:
updated_model_list = self.generate.model_manager.list_models()
socketio.emit(
"modelDeleted",
{
"deleted_model_name": model_name,
"model_list": updated_model_list,
},
{"deleted_model_name": model_name,
"model_list": updated_model_list},
)
print(f">> Model Deleted: {model_name}")
except Exception as e:
@@ -398,48 +383,41 @@ class InvokeAIWebServer:
except Exception as e:
self.handle_exceptions(e)
@socketio.on("convertToDiffusers")
@socketio.on('convertToDiffusers')
def convert_to_diffusers(model_to_convert: dict):
try:
if model_info := self.generate.model_manager.model_info(
model_name=model_to_convert["model_name"]
):
if "weights" in model_info:
ckpt_path = Path(model_info["weights"])
original_config_file = Path(model_info["config"])
model_name = model_to_convert["model_name"]
model_description = model_info["description"]
if (model_info := self.generate.model_manager.model_info(model_name=model_to_convert['model_name'])):
if 'weights' in model_info:
ckpt_path = Path(model_info['weights'])
original_config_file = Path(model_info['config'])
model_name = model_to_convert['model_name']
model_description = model_info['description']
else:
self.socketio.emit(
"error", {"message": "Model is not a valid checkpoint file"}
)
"error", {"message": "Model is not a valid checkpoint file"})
else:
self.socketio.emit(
"error", {"message": "Could not retrieve model info."}
)
"error", {"message": "Could not retrieve model info."})
if not ckpt_path.is_absolute():
ckpt_path = Path(Globals.root, ckpt_path)
if original_config_file and not original_config_file.is_absolute():
original_config_file = Path(Globals.root, original_config_file)
original_config_file = Path(
Globals.root, original_config_file)
diffusers_path = Path(
ckpt_path.parent.absolute(), f"{model_name}_diffusers"
ckpt_path.parent.absolute(),
f'{model_name}_diffusers'
)
if model_to_convert["save_location"] == "root":
if model_to_convert['save_location'] == 'root':
diffusers_path = Path(
global_converted_ckpts_dir(), f"{model_name}_diffusers"
)
global_converted_ckpts_dir(), f'{model_name}_diffusers')
if (
model_to_convert["save_location"] == "custom"
and model_to_convert["custom_location"] is not None
):
if model_to_convert['save_location'] == 'custom' and model_to_convert['custom_location'] is not None:
diffusers_path = Path(
model_to_convert["custom_location"], f"{model_name}_diffusers"
)
model_to_convert['custom_location'], f'{model_name}_diffusers')
if diffusers_path.exists():
shutil.rmtree(diffusers_path)
@@ -457,99 +435,54 @@ class InvokeAIWebServer:
new_model_list = self.generate.model_manager.list_models()
socketio.emit(
"modelConverted",
{
"new_model_name": model_name,
"model_list": new_model_list,
"update": True,
},
{"new_model_name": model_name,
"model_list": new_model_list, 'update': True},
)
print(f">> Model Converted: {model_name}")
except Exception as e:
self.handle_exceptions(e)
@socketio.on("mergeDiffusersModels")
@socketio.on('mergeDiffusersModels')
def merge_diffusers_models(model_merge_info: dict):
try:
models_to_merge = model_merge_info["models_to_merge"]
models_to_merge = model_merge_info['models_to_merge']
model_ids_or_paths = [
self.generate.model_manager.model_name_or_path(x)
for x in models_to_merge
]
self.generate.model_manager.model_name_or_path(x) for x in models_to_merge]
merged_pipe = merge_diffusion_models(
model_ids_or_paths,
model_merge_info["alpha"],
model_merge_info["interp"],
model_merge_info["force"],
)
model_ids_or_paths, model_merge_info['alpha'], model_merge_info['interp'], model_merge_info['force'])
dump_path = global_models_dir() / "merged_models"
if model_merge_info["model_merge_save_path"] is not None:
dump_path = Path(model_merge_info["model_merge_save_path"])
dump_path = global_models_dir() / 'merged_models'
if model_merge_info['model_merge_save_path'] is not None:
dump_path = Path(model_merge_info['model_merge_save_path'])
os.makedirs(dump_path, exist_ok=True)
dump_path = dump_path / model_merge_info["merged_model_name"]
dump_path = dump_path / model_merge_info['merged_model_name']
merged_pipe.save_pretrained(dump_path, safe_serialization=1)
merged_model_config = dict(
model_name=model_merge_info["merged_model_name"],
model_name=model_merge_info['merged_model_name'],
description=f'Merge of models {", ".join(models_to_merge)}',
commit_to_conf=opt.conf,
commit_to_conf=opt.conf
)
if vae := self.generate.model_manager.config[models_to_merge[0]].get(
"vae", None
):
print(f">> Using configured VAE assigned to {models_to_merge[0]}")
if vae := self.generate.model_manager.config[models_to_merge[0]].get("vae", None):
print(
f">> Using configured VAE assigned to {models_to_merge[0]}")
merged_model_config.update(vae=vae)
self.generate.model_manager.import_diffuser_model(
dump_path, **merged_model_config
)
dump_path, **merged_model_config)
new_model_list = self.generate.model_manager.list_models()
socketio.emit(
"modelsMerged",
{
"merged_models": models_to_merge,
"merged_model_name": model_merge_info["merged_model_name"],
"model_list": new_model_list,
"update": True,
},
{"merged_models": models_to_merge,
"merged_model_name": model_merge_info['merged_model_name'],
"model_list": new_model_list, 'update': True},
)
print(f">> Models Merged: {models_to_merge}")
print(f">> New Model Added: {model_merge_info['merged_model_name']}")
except Exception as e:
self.handle_exceptions(e)
@socketio.on("getLoraModels")
def get_lora_models():
try:
lora_path = global_lora_models_dir()
loras = []
for root, _, files in os.walk(lora_path):
models = [
Path(root, x)
for x in files
if Path(x).suffix in [".ckpt", ".pt", ".safetensors"]
]
loras = loras + models
found_loras = []
for lora in sorted(loras, key=lambda s: s.stem.lower()):
location = str(lora.resolve()).replace("\\", "/")
found_loras.append({"name": lora.stem, "location": location})
socketio.emit("foundLoras", found_loras)
except Exception as e:
self.handle_exceptions(e)
@socketio.on("getTextualInversionTriggers")
def get_ti_triggers():
try:
local_triggers = self.generate.model.textual_inversion_manager.get_all_trigger_strings()
locals = [{'name': x} for x in sorted(local_triggers, key=str.casefold)]
concepts = HuggingFaceConceptsLibrary().list_concepts(minimum_likes=5)
concepts = [{'name': f'<{x}>'} for x in sorted(concepts, key=str.casefold) if f'<{x}>' not in local_triggers]
socketio.emit("foundTextualInversionTriggers", {'local_triggers': locals, 'huggingface_concepts': concepts})
print(
f">> New Model Added: {model_merge_info['merged_model_name']}")
except Exception as e:
self.handle_exceptions(e)
@@ -567,8 +500,7 @@ class InvokeAIWebServer:
os.remove(thumbnail_path)
except Exception as e:
socketio.emit(
"error", {"message": f"Unable to delete {f}: {str(e)}"}
)
"error", {"message": f"Unable to delete {f}: {str(e)}"})
pass
socketio.emit("tempFolderEmptied")
@@ -579,7 +511,8 @@ class InvokeAIWebServer:
def save_temp_image_to_gallery(url):
try:
image_path = self.get_image_path_from_url(url)
new_path = os.path.join(self.result_path, os.path.basename(image_path))
new_path = os.path.join(
self.result_path, os.path.basename(image_path))
shutil.copy2(image_path, new_path)
if os.path.splitext(new_path)[1] == ".png":
@@ -592,7 +525,8 @@ class InvokeAIWebServer:
(width, height) = pil_image.size
thumbnail_path = save_thumbnail(
pil_image, os.path.basename(new_path), self.thumbnail_image_path
pil_image, os.path.basename(
new_path), self.thumbnail_image_path
)
image_array = [
@@ -651,7 +585,8 @@ class InvokeAIWebServer:
(width, height) = pil_image.size
thumbnail_path = save_thumbnail(
pil_image, os.path.basename(path), self.thumbnail_image_path
pil_image, os.path.basename(
path), self.thumbnail_image_path
)
image_array.append(
@@ -670,8 +605,7 @@ class InvokeAIWebServer:
)
except Exception as e:
socketio.emit(
"error", {"message": f"Unable to load {path}: {str(e)}"}
)
"error", {"message": f"Unable to load {path}: {str(e)}"})
pass
socketio.emit(
@@ -721,7 +655,8 @@ class InvokeAIWebServer:
(width, height) = pil_image.size
thumbnail_path = save_thumbnail(
pil_image, os.path.basename(path), self.thumbnail_image_path
pil_image, os.path.basename(
path), self.thumbnail_image_path
)
image_array.append(
@@ -741,8 +676,7 @@ class InvokeAIWebServer:
except Exception as e:
print(f">> Unable to load {path}")
socketio.emit(
"error", {"message": f"Unable to load {path}: {str(e)}"}
)
"error", {"message": f"Unable to load {path}: {str(e)}"})
pass
socketio.emit(
@@ -776,9 +710,10 @@ class InvokeAIWebServer:
printable_parameters["init_mask"][:64] + "..."
)
print(f"\n>> Image Generation Parameters:\n\n{printable_parameters}\n")
print(f">> ESRGAN Parameters: {esrgan_parameters}")
print(f">> Facetool Parameters: {facetool_parameters}")
print(
f'\n>> Image Generation Parameters:\n\n{printable_parameters}\n')
print(f'>> ESRGAN Parameters: {esrgan_parameters}')
print(f'>> Facetool Parameters: {facetool_parameters}')
self.generate_images(
generation_parameters,
@@ -815,9 +750,11 @@ class InvokeAIWebServer:
if postprocessing_parameters["type"] == "esrgan":
progress.set_current_status("common.statusUpscalingESRGAN")
elif postprocessing_parameters["type"] == "gfpgan":
progress.set_current_status("common.statusRestoringFacesGFPGAN")
progress.set_current_status(
"common.statusRestoringFacesGFPGAN")
elif postprocessing_parameters["type"] == "codeformer":
progress.set_current_status("common.statusRestoringFacesCodeFormer")
progress.set_current_status(
"common.statusRestoringFacesCodeFormer")
socketio.emit("progressUpdate", progress.to_formatted_dict())
eventlet.sleep(0)
@@ -982,7 +919,8 @@ class InvokeAIWebServer:
init_img_url = generation_parameters["init_img"]
original_bounding_box = generation_parameters["bounding_box"].copy()
original_bounding_box = generation_parameters["bounding_box"].copy(
)
initial_image = dataURL_to_image(
generation_parameters["init_img"]
@@ -1059,9 +997,8 @@ class InvokeAIWebServer:
elif generation_parameters["generation_mode"] == "img2img":
init_img_url = generation_parameters["init_img"]
init_img_path = self.get_image_path_from_url(init_img_url)
generation_parameters["init_img"] = Image.open(init_img_path).convert(
"RGB"
)
generation_parameters["init_img"] = Image.open(
init_img_path).convert('RGB')
def image_progress(sample, step):
if self.canceled.is_set():
@@ -1121,7 +1058,8 @@ class InvokeAIWebServer:
)
if generation_parameters["progress_latents"]:
image = self.generate.sample_to_lowres_estimated_image(sample)
image = self.generate.sample_to_lowres_estimated_image(
sample)
(width, height) = image.size
width *= 8
height *= 8
@@ -1140,7 +1078,8 @@ class InvokeAIWebServer:
},
)
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
self.socketio.emit(
"progressUpdate", progress.to_formatted_dict())
eventlet.sleep(0)
def image_done(image, seed, first_seed, attention_maps_image=None):
@@ -1167,7 +1106,8 @@ class InvokeAIWebServer:
progress.set_current_status("common.statusGenerationComplete")
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
self.socketio.emit(
"progressUpdate", progress.to_formatted_dict())
eventlet.sleep(0)
all_parameters = generation_parameters
@@ -1178,7 +1118,8 @@ class InvokeAIWebServer:
and all_parameters["variation_amount"] > 0
):
first_seed = first_seed or seed
this_variation = [[seed, all_parameters["variation_amount"]]]
this_variation = [
[seed, all_parameters["variation_amount"]]]
all_parameters["with_variations"] = (
prior_variations + this_variation
)
@@ -1194,13 +1135,14 @@ class InvokeAIWebServer:
if esrgan_parameters:
progress.set_current_status("common.statusUpscaling")
progress.set_current_status_has_steps(False)
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
self.socketio.emit(
"progressUpdate", progress.to_formatted_dict())
eventlet.sleep(0)
image = self.esrgan.process(
image=image,
upsampler_scale=esrgan_parameters["level"],
denoise_str=esrgan_parameters["denoise_str"],
denoise_str=esrgan_parameters['denoise_str'],
strength=esrgan_parameters["strength"],
seed=seed,
)
@@ -1208,7 +1150,7 @@ class InvokeAIWebServer:
postprocessing = True
all_parameters["upscale"] = [
esrgan_parameters["level"],
esrgan_parameters["denoise_str"],
esrgan_parameters['denoise_str'],
esrgan_parameters["strength"],
]
@@ -1217,14 +1159,15 @@ class InvokeAIWebServer:
if facetool_parameters:
if facetool_parameters["type"] == "gfpgan":
progress.set_current_status("common.statusRestoringFacesGFPGAN")
progress.set_current_status(
"common.statusRestoringFacesGFPGAN")
elif facetool_parameters["type"] == "codeformer":
progress.set_current_status(
"common.statusRestoringFacesCodeFormer"
)
"common.statusRestoringFacesCodeFormer")
progress.set_current_status_has_steps(False)
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
self.socketio.emit(
"progressUpdate", progress.to_formatted_dict())
eventlet.sleep(0)
if facetool_parameters["type"] == "gfpgan":
@@ -1254,7 +1197,8 @@ class InvokeAIWebServer:
all_parameters["facetool_type"] = facetool_parameters["type"]
progress.set_current_status("common.statusSavingImage")
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
self.socketio.emit(
"progressUpdate", progress.to_formatted_dict())
eventlet.sleep(0)
# restore the stashed URLS and discard the paths, we are about to send the result to client
@@ -1271,7 +1215,8 @@ class InvokeAIWebServer:
if generation_parameters["generation_mode"] == "unifiedCanvas":
all_parameters["bounding_box"] = original_bounding_box
metadata = self.parameters_to_generated_image_metadata(all_parameters)
metadata = self.parameters_to_generated_image_metadata(
all_parameters)
command = parameters_to_command(all_parameters)
@@ -1301,27 +1246,22 @@ class InvokeAIWebServer:
if progress.total_iterations > progress.current_iteration:
progress.set_current_step(1)
progress.set_current_status("common.statusIterationComplete")
progress.set_current_status(
"common.statusIterationComplete")
progress.set_current_status_has_steps(False)
else:
progress.mark_complete()
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
self.socketio.emit(
"progressUpdate", progress.to_formatted_dict())
eventlet.sleep(0)
parsed_prompt, _ = get_prompt_structure(generation_parameters["prompt"])
tokens = (
None
if type(parsed_prompt) is Blend
else get_tokens_for_prompt_object(
get_tokenizer(self.generate.model), parsed_prompt
)
)
attention_maps_image_base64_url = (
None
if attention_maps_image is None
parsed_prompt, _ = get_prompt_structure(
generation_parameters["prompt"])
tokens = None if type(parsed_prompt) is Blend else \
get_tokens_for_prompt_object(get_tokenizer(self.generate.model), parsed_prompt)
attention_maps_image_base64_url = None if attention_maps_image is None \
else image_to_dataURL(attention_maps_image)
)
self.socketio.emit(
"generationResult",
@@ -1353,7 +1293,7 @@ class InvokeAIWebServer:
self.generate.prompt2image(
**generation_parameters,
step_callback=diffusers_step_callback_adapter,
image_callback=image_done,
image_callback=image_done
)
except KeyboardInterrupt:
@@ -1476,7 +1416,8 @@ class InvokeAIWebServer:
self, parameters, original_image_path
):
try:
current_metadata = retrieve_metadata(original_image_path)["sd-metadata"]
current_metadata = retrieve_metadata(
original_image_path)["sd-metadata"]
postprocessing_metadata = {}
"""
@@ -1516,7 +1457,8 @@ class InvokeAIWebServer:
postprocessing_metadata
)
else:
current_metadata["image"]["postprocessing"] = [postprocessing_metadata]
current_metadata["image"]["postprocessing"] = [
postprocessing_metadata]
return current_metadata
@@ -1612,7 +1554,8 @@ class InvokeAIWebServer:
)
elif "thumbnails" in url:
return os.path.abspath(
os.path.join(self.thumbnail_image_path, os.path.basename(url))
os.path.join(self.thumbnail_image_path,
os.path.basename(url))
)
else:
return os.path.abspath(
@@ -1658,7 +1601,7 @@ class InvokeAIWebServer:
except Exception as e:
self.handle_exceptions(e)
def handle_exceptions(self, exception, emit_key: str = "error"):
def handle_exceptions(self, exception, emit_key: str = 'error'):
self.socketio.emit(emit_key, {"message": (str(exception))})
print("\n")
traceback.print_exc()

View File

@@ -1,67 +0,0 @@
model:
base_learning_rate: 1.0e-4
target: ldm.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
num_timesteps_cond: 1
log_every_t: 200
timesteps: 1000
first_stage_key: "jpg"
cond_stage_key: "txt"
image_size: 64
channels: 4
cond_stage_trainable: false
conditioning_key: crossattn
monitor: val/loss_simple_ema
scale_factor: 0.18215
use_ema: False # we set this to false because this is an inference only config
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
use_checkpoint: True
use_fp16: True
image_size: 32 # unused
in_channels: 4
out_channels: 4
model_channels: 320
attention_resolutions: [ 4, 2, 1 ]
num_res_blocks: 2
channel_mult: [ 1, 2, 4, 4 ]
num_head_channels: 64 # need to fix for flash-attn
use_spatial_transformer: True
use_linear_in_transformer: True
transformer_depth: 1
context_dim: 1024
legacy: False
first_stage_config:
target: ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
#attn_type: "vanilla-xformers"
double_z: true
z_channels: 4
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult:
- 1
- 2
- 4
- 4
num_res_blocks: 2
attn_resolutions: []
dropout: 0.0
lossconfig:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
params:
freeze: True
layer: "penultimate"

View File

@@ -1,7 +1,6 @@
module.exports = {
trailingComma: 'es5',
tabWidth: 2,
endOfLine: 'auto',
semi: true,
singleQuote: true,
overrides: [

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -5,8 +5,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
<script type="module" crossorigin src="./assets/index-f56b39bc.js"></script>
<link rel="stylesheet" href="./assets/index-2ab0eb58.css">
<script type="module" crossorigin src="./assets/index-c33fa9da.js"></script>
<link rel="stylesheet" href="./assets/index-14cb2922.css">
</head>
<body>

View File

@@ -327,13 +327,6 @@
"addModel": "Add Model",
"updateModel": "Update Model",
"availableModels": "Available Models",
"addLora": "Add Lora",
"clearLoras": "Clear Loras",
"noLoraModels": "No Loras Found",
"addTextualInversionTrigger": "Add Textual Inversion",
"addTIToNegative": "Add To Negative",
"clearTextualInversions": "Clear Textual Inversions",
"noTextualInversionTriggers": "No Textual Inversions Found",
"search": "Search",
"load": "Load",
"active": "active",
@@ -372,8 +365,7 @@
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
"convertToDiffusersSaveLocation": "Save Location",
"v1": "v1",
"v2_base": "v2 (512px)",
"v2_768": "v2 (768px)",
"v2": "v2",
"inpainting": "v1 Inpainting",
"customConfig": "Custom Config",
"pathToCustomConfig": "Path To Custom Config",
@@ -490,7 +482,6 @@
"useCanvasBeta": "Use Canvas Beta Layout",
"enableImageDebugging": "Enable Image Debugging",
"useSlidersForAll": "Use Sliders For All Options",
"showHuggingFaceConcepts": "Show Textual Inversions from HF Concepts Library",
"resetWebUI": "Reset Web UI",
"resetWebUIDesc1": "Resetting the web UI only resets the browser's local cache of your images and remembered settings. It does not delete any images from disk.",
"resetWebUIDesc2": "If images aren't showing up in the gallery or something else isn't working, please try resetting before submitting an issue on GitHub.",

View File

@@ -15,8 +15,8 @@
"postinstall": "patch-package"
},
"dependencies": {
"@chakra-ui/icons": "^2.0.18",
"@chakra-ui/react": "^2.5.5",
"@chakra-ui/icons": "^2.0.17",
"@chakra-ui/react": "^2.5.1",
"@emotion/cache": "^11.10.5",
"@emotion/react": "^11.10.6",
"@emotion/styled": "^11.10.6",
@@ -52,7 +52,6 @@
"redux-persist": "^6.0.0",
"socket.io": "^4.6.0",
"socket.io-client": "^4.6.0",
"typescript": "^5.0.3",
"use-image": "^1.1.0",
"uuid": "^9.0.0",
"yarn": "^1.22.19"
@@ -62,8 +61,8 @@
"@types/react": "^18.0.28",
"@types/react-dom": "^18.0.11",
"@types/react-transition-group": "^4.4.5",
"@typescript-eslint/eslint-plugin": "^5.57.0",
"@typescript-eslint/parser": "^5.57.0",
"@typescript-eslint/eslint-plugin": "^5.52.0",
"@typescript-eslint/parser": "^5.52.0",
"babel-plugin-transform-imports": "^2.0.0",
"eslint": "^8.34.0",
"eslint-config-prettier": "^8.6.0",

View File

@@ -327,13 +327,6 @@
"addModel": "Add Model",
"updateModel": "Update Model",
"availableModels": "Available Models",
"addLora": "Add Lora",
"clearLoras": "Clear Loras",
"noLoraModels": "No Loras Found",
"addTextualInversionTrigger": "Add Textual Inversion",
"addTIToNegative": "Add To Negative",
"clearTextualInversions": "Clear Textual Inversions",
"noTextualInversionTriggers": "No Textual Inversions Found",
"search": "Search",
"load": "Load",
"active": "active",
@@ -372,8 +365,7 @@
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
"convertToDiffusersSaveLocation": "Save Location",
"v1": "v1",
"v2_base": "v2 (512px)",
"v2_768": "v2 (768px)",
"v2": "v2",
"inpainting": "v1 Inpainting",
"customConfig": "Custom Config",
"pathToCustomConfig": "Path To Custom Config",
@@ -490,7 +482,6 @@
"useCanvasBeta": "Use Canvas Beta Layout",
"enableImageDebugging": "Enable Image Debugging",
"useSlidersForAll": "Use Sliders For All Options",
"showHuggingFaceConcepts": "Show Textual Inversions from HF Concepts Library",
"resetWebUI": "Reset Web UI",
"resetWebUIDesc1": "Resetting the web UI only resets the browser's local cache of your images and remembered settings. It does not delete any images from disk.",
"resetWebUIDesc2": "If images aren't showing up in the gallery or something else isn't working, please try resetting before submitting an issue on GitHub.",

View File

@@ -271,23 +271,6 @@ export declare type FoundModelResponse = {
found_models: FoundModel[];
};
export declare type FoundLora = {
name: string;
location: string;
};
export declare type FoundTextualInversionTriggers = {
name: string;
location: string;
};
export declare type FoundLorasRsponse = FoundLora[];
export declare type FoundTextualInversionTriggersResponse = {
local_triggers: FoundTextualInversionTriggers[];
huggingface_concepts: FoundTextualInversionTriggers[];
};
export declare type SystemStatusResponse = SystemStatus;
export declare type SystemConfigResponse = SystemConfig;

View File

@@ -52,12 +52,6 @@ export const requestModelChange = createAction<string>(
'socketio/requestModelChange'
);
export const getLoraModels = createAction<undefined>('socketio/getLoraModels');
export const getTextualInversionTriggers = createAction<undefined>(
'socketio/getTextualInversionTriggers'
);
export const saveStagingAreaImageToGallery = createAction<string>(
'socketio/saveStagingAreaImageToGallery'
);

View File

@@ -196,12 +196,6 @@ const makeSocketIOEmitters = (
dispatch(modelChangeRequested());
socketio.emit('requestModelChange', modelName);
},
emitGetLoraModels: () => {
socketio.emit('getLoraModels');
},
emitGetTextualInversionTriggers: () => {
socketio.emit('getTextualInversionTriggers');
},
emitSaveStagingAreaImageToGallery: (url: string) => {
socketio.emit('requestSaveStagingAreaImageToGallery', url);
},

View File

@@ -11,7 +11,6 @@ import {
errorOccurred,
processingCanceled,
setCurrentStatus,
setFoundLoras,
setFoundModels,
setIsCancelable,
setIsConnected,
@@ -20,8 +19,6 @@ import {
setSearchFolder,
setSystemConfig,
setSystemStatus,
setFoundLocalTextualInversionTriggers,
setFoundHuggingFaceTextualInversionTriggers,
} from 'features/system/store/systemSlice';
import {
@@ -37,10 +34,8 @@ import type { RootState } from 'app/store';
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
import {
clearInitialImage,
setHuggingFaceTextualInversionConcepts,
setInfillMethod,
setInitialImage,
setLocalTextualInversionTriggers,
setMaskPath,
} from 'features/parameters/store/generationSlice';
import { tabMap } from 'features/ui/store/tabMap';
@@ -487,37 +482,6 @@ const makeSocketIOListeners = (
})
);
},
onFoundLoras: (data: InvokeAI.FoundLorasRsponse) => {
dispatch(setFoundLoras(data));
},
onFoundTextualInversionTriggers: (
data: InvokeAI.FoundTextualInversionTriggersResponse
) => {
const localTriggers = data.local_triggers;
const huggingFaceConcepts = data.huggingface_concepts;
dispatch(setFoundLocalTextualInversionTriggers(localTriggers));
dispatch(
setFoundHuggingFaceTextualInversionTriggers(huggingFaceConcepts)
);
// Assign Local TI's
const foundLocalTINames: string[] = [];
localTriggers.forEach((textualInversion) => {
foundLocalTINames.push(textualInversion.name);
});
dispatch(setLocalTextualInversionTriggers(foundLocalTINames));
// Assign HuggingFace Concepts
const foundHuggingFaceConceptNames: string[] = [];
huggingFaceConcepts.forEach((concept) => {
foundHuggingFaceConceptNames.push(concept.name);
});
dispatch(
setHuggingFaceTextualInversionConcepts(foundHuggingFaceConceptNames)
);
},
onTempFolderEmptied: () => {
dispatch(
addToast({

View File

@@ -51,8 +51,6 @@ export const socketioMiddleware = () => {
onModelConverted,
onModelsMerged,
onModelChangeFailed,
onFoundLoras,
onFoundTextualInversionTriggers,
onTempFolderEmptied,
} = makeSocketIOListeners(store);
@@ -71,8 +69,6 @@ export const socketioMiddleware = () => {
emitConvertToDiffusers,
emitMergeDiffusersModels,
emitRequestModelChange,
emitGetLoraModels,
emitGetTextualInversionTriggers,
emitSaveStagingAreaImageToGallery,
emitRequestEmptyTempFolder,
} = makeSocketIOEmitters(store, socketio);
@@ -149,17 +145,6 @@ export const socketioMiddleware = () => {
onModelChangeFailed(data);
});
socketio.on('foundLoras', (data: InvokeAI.FoundLorasRsponse) => {
onFoundLoras(data);
});
socketio.on(
'foundTextualInversionTriggers',
(data: InvokeAI.FoundTextualInversionTriggersResponse) => {
onFoundTextualInversionTriggers(data);
}
);
socketio.on('tempFolderEmptied', () => {
onTempFolderEmptied();
});
@@ -241,16 +226,6 @@ export const socketioMiddleware = () => {
break;
}
case 'socketio/getLoraModels': {
emitGetLoraModels();
break;
}
case 'socketio/getTextualInversionTriggers': {
emitGetTextualInversionTriggers();
break;
}
case 'socketio/saveStagingAreaImageToGallery': {
emitSaveStagingAreaImageToGallery(action.payload);
break;

View File

@@ -7,14 +7,13 @@ import {
MenuButtonProps,
MenuListProps,
MenuItemProps,
Text,
} from '@chakra-ui/react';
import { MouseEventHandler, ReactNode } from 'react';
import { MdArrowDropDown, MdArrowDropUp } from 'react-icons/md';
import IAIButton from './IAIButton';
import IAIIconButton from './IAIIconButton';
export interface IAIMenuItem {
interface IAIMenuItem {
item: ReactNode | string;
onClick: MouseEventHandler<HTMLButtonElement> | undefined;
}
@@ -44,7 +43,6 @@ export default function IAISimpleMenu(props: IAIMenuProps) {
const renderMenuItems = () => {
const menuItemsToRender: ReactNode[] = [];
menuItems.forEach((menuItem, index) => {
menuItemsToRender.push(
<MenuItem
@@ -84,17 +82,12 @@ export default function IAISimpleMenu(props: IAIMenuProps) {
fontSize="1.5rem"
{...menuButtonProps}
>
{menuType === 'regular' && (
<Text fontSize="0.9rem">{buttonText}</Text>
)}
{menuType === 'regular' && buttonText}
</MenuButton>
<MenuList
zIndex={15}
padding={0}
borderRadius="0.5rem"
overflow="scroll"
maxWidth={'22.5rem'}
maxHeight={500}
backgroundColor="var(--background-color-secondary)"
color="var(--text-color-secondary)"
borderColor="var(--border-color)"

View File

@@ -21,7 +21,6 @@ import {
setInitialImage,
setSeed,
} from 'features/parameters/store/generationSlice';
import { setAllPostProcessingParameters } from 'features/parameters/store/postprocessingSlice';
import { postprocessingSelector } from 'features/parameters/store/postprocessingSelectors';
import { systemSelector } from 'features/system/store/systemSelectors';
import { SystemState } from 'features/system/store/systemSlice';
@@ -190,12 +189,11 @@ const CurrentImageButtons = () => {
);
const handleClickUseAllParameters = () => {
if (!currentImage?.metadata) return;
dispatch(setAllParameters(currentImage.metadata));
dispatch(setAllPostProcessingParameters(currentImage.metadata));
if (currentImage.metadata.image.type === 'img2img') {
if (!currentImage) return;
currentImage.metadata && dispatch(setAllParameters(currentImage.metadata));
if (currentImage.metadata?.image.type === 'img2img') {
dispatch(setActiveTab('img2img'));
} else if (currentImage.metadata.image.type === 'txt2img') {
} else if (currentImage.metadata?.image.type === 'txt2img') {
dispatch(setActiveTab('txt2img'));
}
};

View File

@@ -10,7 +10,6 @@ import {
setInitialImage,
setSeed,
} from 'features/parameters/store/generationSlice';
import { setAllPostProcessingParameters } from 'features/parameters/store/postprocessingSlice';
import { DragEvent, memo, useState } from 'react';
import { FaCheck, FaTrashAlt } from 'react-icons/fa';
import DeleteImageModal from './DeleteImageModal';
@@ -115,10 +114,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
};
const handleUseAllParameters = () => {
if (metadata) {
dispatch(setAllParameters(metadata));
dispatch(setAllPostProcessingParameters(metadata));
}
metadata && dispatch(setAllParameters(metadata));
toast({
title: t('toast.parametersSet'),
status: 'success',

View File

@@ -34,6 +34,7 @@ export default function MainWidth() {
withSliderMarks
sliderMarkRightOffset={-8}
inputWidth="6.2rem"
inputReadOnly
sliderNumberInputProps={{ max: 15360 }}
/>
) : (

View File

@@ -1,86 +0,0 @@
import { Box, Flex } from '@chakra-ui/react';
import { getLoraModels } from 'app/socketio/actions';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAIIconButton from 'common/components/IAIIconButton';
import IAISimpleMenu, { IAIMenuItem } from 'common/components/IAISimpleMenu';
import {
setClearLoras,
setLorasInUse,
} from 'features/parameters/store/generationSlice';
import { useEffect } from 'react';
import { useTranslation } from 'react-i18next';
import { MdClear } from 'react-icons/md';
export default function LoraManager() {
const dispatch = useAppDispatch();
const foundLoras = useAppSelector((state) => state.system.foundLoras);
const lorasInUse = useAppSelector((state) => state.generation.lorasInUse);
const { t } = useTranslation();
const handleLora = (lora: string) => {
dispatch(setLorasInUse(lora));
};
useEffect(() => {
dispatch(getLoraModels());
}, [dispatch]);
const renderLoraOption = (lora: string) => {
const thisloraExists = lorasInUse.includes(lora);
const loraExistsStyle = {
fontWeight: 'bold',
color: 'var(--context-menu-active-item)',
};
return <Box style={thisloraExists ? loraExistsStyle : {}}>{lora}</Box>;
};
const numOfActiveLoras = () => {
const foundLoraNames: string[] = [];
foundLoras?.forEach((lora) => {
foundLoraNames.push(lora.name);
});
return foundLoraNames.filter((lora) => lorasInUse.includes(lora)).length;
};
const makeLoraItems = () => {
const lorasFound: IAIMenuItem[] = [];
foundLoras?.forEach((lora) => {
if (lora.name !== ' ') {
const newLoraItem: IAIMenuItem = {
item: renderLoraOption(lora.name),
onClick: () => handleLora(lora.name),
};
lorasFound.push(newLoraItem);
}
});
return lorasFound;
};
return foundLoras && foundLoras?.length > 0 ? (
<Flex columnGap={2}>
<IAISimpleMenu
menuItems={makeLoraItems()}
menuType="regular"
buttonText={`${t('modelManager.addLora')} (${numOfActiveLoras()})`}
menuButtonProps={{ width: '100%', padding: '0 1rem' }}
/>
<IAIIconButton
icon={<MdClear />}
tooltip={t('modelManager.clearLoras')}
aria-label={t('modelManager.clearLoras')}
onClick={() => dispatch(setClearLoras())}
/>
</Flex>
) : (
<Box
background="var(--btn-base-color)"
padding={2}
textAlign="center"
borderRadius={4}
fontWeight="bold"
>
{t('modelManager.noLoraModels')}
</Box>
);
}

View File

@@ -1,12 +0,0 @@
import { Flex } from '@chakra-ui/react';
import LoraManager from './LoraManager/LoraManager';
import TextualInversionManager from './TextualInversionManager/TextualInversionManager';
export default function PromptExtras() {
return (
<Flex flexDir="column" rowGap={2}>
<LoraManager />
<TextualInversionManager />
</Flex>
);
}

View File

@@ -1,163 +0,0 @@
import { Box, Flex } from '@chakra-ui/react';
import { getTextualInversionTriggers } from 'app/socketio/actions';
import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAIIconButton from 'common/components/IAIIconButton';
import IAISimpleMenu, { IAIMenuItem } from 'common/components/IAISimpleMenu';
import {
setAddTIToNegative,
setClearTextualInversions,
setTextualInversionsInUse,
} from 'features/parameters/store/generationSlice';
import { useEffect } from 'react';
import { useTranslation } from 'react-i18next';
import { MdArrowDownward, MdClear } from 'react-icons/md';
export default function TextualInversionManager() {
const dispatch = useAppDispatch();
const textualInversionsInUse = useAppSelector(
(state: RootState) => state.generation.textualInversionsInUse
);
const negativeTextualInversionsInUse = useAppSelector(
(state: RootState) => state.generation.negativeTextualInversionsInUse
);
const foundLocalTextualInversionTriggers = useAppSelector(
(state) => state.system.foundLocalTextualInversionTriggers
);
const foundHuggingFaceTextualInversionTriggers = useAppSelector(
(state) => state.system.foundHuggingFaceTextualInversionTriggers
);
const localTextualInversionTriggers = useAppSelector(
(state) => state.generation.localTextualInversionTriggers
);
const huggingFaceTextualInversionConcepts = useAppSelector(
(state) => state.generation.huggingFaceTextualInversionConcepts
);
const shouldShowHuggingFaceConcepts = useAppSelector(
(state) => state.ui.shouldShowHuggingFaceConcepts
);
const addTIToNegative = useAppSelector(
(state) => state.generation.addTIToNegative
);
const { t } = useTranslation();
useEffect(() => {
dispatch(getTextualInversionTriggers());
}, [dispatch]);
const handleTextualInversion = (textual_inversion: string) => {
dispatch(setTextualInversionsInUse(textual_inversion));
};
const TIPip = ({ color }: { color: string }) => {
return (
<Box width={2} height={2} borderRadius={9999} backgroundColor={color}>
{' '}
</Box>
);
};
const renderTextualInversionOption = (textual_inversion: string) => {
return (
<Flex alignItems="center" columnGap={1}>
{textual_inversion}
{textualInversionsInUse.includes(textual_inversion) && (
<TIPip color="var(--context-menu-active-item)" />
)}
{negativeTextualInversionsInUse.includes(textual_inversion) && (
<TIPip color="var(--status-bad-color)" />
)}
</Flex>
);
};
const numOfActiveTextualInversions = () => {
const allTextualInversions = localTextualInversionTriggers.concat(
huggingFaceTextualInversionConcepts
);
return allTextualInversions.filter(
(ti) =>
textualInversionsInUse.includes(ti) ||
negativeTextualInversionsInUse.includes(ti)
).length;
};
const makeTextualInversionItems = () => {
const textualInversionsFound: IAIMenuItem[] = [];
foundLocalTextualInversionTriggers?.forEach((textualInversion) => {
if (textualInversion.name !== ' ') {
const newTextualInversionItem: IAIMenuItem = {
item: renderTextualInversionOption(textualInversion.name),
onClick: () => handleTextualInversion(textualInversion.name),
};
textualInversionsFound.push(newTextualInversionItem);
}
});
if (shouldShowHuggingFaceConcepts) {
foundHuggingFaceTextualInversionTriggers?.forEach((textualInversion) => {
if (textualInversion.name !== ' ') {
const newTextualInversionItem: IAIMenuItem = {
item: renderTextualInversionOption(textualInversion.name),
onClick: () => handleTextualInversion(textualInversion.name),
};
textualInversionsFound.push(newTextualInversionItem);
}
});
}
return textualInversionsFound;
};
return foundLocalTextualInversionTriggers &&
(foundLocalTextualInversionTriggers?.length > 0 ||
(foundHuggingFaceTextualInversionTriggers &&
foundHuggingFaceTextualInversionTriggers?.length > 0 &&
shouldShowHuggingFaceConcepts)) ? (
<Flex columnGap={2}>
<IAISimpleMenu
menuItems={makeTextualInversionItems()}
menuType="regular"
buttonText={`${t(
'modelManager.addTextualInversionTrigger'
)} (${numOfActiveTextualInversions()})`}
menuButtonProps={{
width: '100%',
padding: '0 1rem',
}}
/>
<IAIIconButton
icon={<MdArrowDownward />}
style={{
backgroundColor: addTIToNegative ? 'var(--btn-delete-image)' : '',
}}
tooltip={t('modelManager.addTIToNegative')}
aria-label={t('modelManager.addTIToNegative')}
onClick={() => dispatch(setAddTIToNegative(!addTIToNegative))}
/>
<IAIIconButton
icon={<MdClear />}
tooltip={t('modelManager.clearTextualInversions')}
aria-label={t('modelManager.clearTextualInversions')}
onClick={() => dispatch(setClearTextualInversions())}
/>
</Flex>
) : (
<Box
background="var(--btn-base-color)"
padding={2}
textAlign="center"
borderRadius={4}
fontWeight="bold"
>
{t('modelManager.noTextualInversionTriggers')}
</Box>
);
}

View File

@@ -1,43 +1,24 @@
import { FormControl, Textarea } from '@chakra-ui/react';
import type { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import {
handlePromptCheckers,
setNegativePrompt,
} from 'features/parameters/store/generationSlice';
import { setNegativePrompt } from 'features/parameters/store/generationSlice';
import { useTranslation } from 'react-i18next';
import { ChangeEvent, useState } from 'react';
const NegativePromptInput = () => {
const negativePrompt = useAppSelector(
(state: RootState) => state.generation.negativePrompt
);
const [promptTimer, setPromptTimer] = useState<number | undefined>(undefined);
const dispatch = useAppDispatch();
const { t } = useTranslation();
const handleNegativeChangePrompt = (e: ChangeEvent<HTMLTextAreaElement>) => {
dispatch(setNegativePrompt(e.target.value));
// Debounce Prompt UI Checking
clearTimeout(promptTimer);
const newPromptTimer = window.setTimeout(() => {
dispatch(
handlePromptCheckers({ prompt: e.target.value, toNegative: true })
);
}, 500);
setPromptTimer(newPromptTimer);
};
return (
<FormControl>
<Textarea
id="negativePrompt"
name="negativePrompt"
value={negativePrompt}
onChange={handleNegativeChangePrompt}
onChange={(e) => dispatch(setNegativePrompt(e.target.value))}
background="var(--prompt-bg-color)"
placeholder={t('parameters.negativePrompts')}
_placeholder={{ fontSize: '0.8rem' }}

View File

@@ -2,13 +2,12 @@ import { FormControl, Textarea } from '@chakra-ui/react';
import { generateImage } from 'app/socketio/actions';
import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import { ChangeEvent, KeyboardEvent, useRef, useState } from 'react';
import { ChangeEvent, KeyboardEvent, useRef } from 'react';
import { createSelector } from '@reduxjs/toolkit';
import { readinessSelector } from 'app/selectors/readinessSelector';
import {
GenerationState,
handlePromptCheckers,
setPrompt,
} from 'features/parameters/store/generationSlice';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
@@ -41,21 +40,11 @@ const PromptInput = () => {
const { isReady } = useAppSelector(readinessSelector);
const promptRef = useRef<HTMLTextAreaElement>(null);
const [promptTimer, setPromptTimer] = useState<number | undefined>(undefined);
const { t } = useTranslation();
const handleChangePrompt = (e: ChangeEvent<HTMLTextAreaElement>) => {
dispatch(setPrompt(e.target.value));
// Debounce Prompt UI Checking
clearTimeout(promptTimer);
const newPromptTimer = window.setTimeout(() => {
dispatch(
handlePromptCheckers({ prompt: e.target.value, toNegative: false })
);
}, 500);
setPromptTimer(newPromptTimer);
};
useHotkeys(

View File

@@ -3,11 +3,7 @@ import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
import * as InvokeAI from 'app/invokeai';
import promptToString from 'common/util/promptToString';
import { useAppDispatch } from 'app/storeHooks';
import {
handlePromptCheckers,
setNegativePrompt,
setPrompt,
} from '../store/generationSlice';
import { setNegativePrompt, setPrompt } from '../store/generationSlice';
// TECHDEBT: We have two metadata prompt formats and need to handle recalling either of them.
// This hook provides a function to do that.
@@ -24,10 +20,6 @@ const useSetBothPrompts = () => {
dispatch(setPrompt(prompt));
dispatch(setNegativePrompt(negativePrompt));
dispatch(handlePromptCheckers({ prompt: prompt, toNegative: false }));
dispatch(
handlePromptCheckers({ prompt: negativePrompt, toNegative: true })
);
};
};

View File

@@ -1,4 +1,4 @@
import { PayloadAction } from '@reduxjs/toolkit';
import type { PayloadAction } from '@reduxjs/toolkit';
import { createSlice } from '@reduxjs/toolkit';
import * as InvokeAI from 'app/invokeai';
import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
@@ -17,12 +17,6 @@ export interface GenerationState {
perlin: number;
prompt: string;
negativePrompt: string;
lorasInUse: string[];
huggingFaceTextualInversionConcepts: string[];
localTextualInversionTriggers: string[];
textualInversionsInUse: string[];
negativeTextualInversionsInUse: string[];
addTIToNegative: boolean;
sampler: string;
seamBlur: number;
seamless: boolean;
@@ -54,12 +48,6 @@ const initialGenerationState: GenerationState = {
perlin: 0,
prompt: '',
negativePrompt: '',
lorasInUse: [],
huggingFaceTextualInversionConcepts: [],
localTextualInversionTriggers: [],
textualInversionsInUse: [],
negativeTextualInversionsInUse: [],
addTIToNegative: false,
sampler: 'k_lms',
seamBlur: 16,
seamless: false,
@@ -83,99 +71,12 @@ const initialGenerationState: GenerationState = {
const initialState: GenerationState = initialGenerationState;
const loraExists = (state: GenerationState, lora: string) => {
const loraRegex = new RegExp(`withLora\\(${lora},?\\s*([^\\)]+)?\\)`);
if (state.prompt.match(loraRegex)) return true;
return false;
};
const getTIRegex = (textualInversion: string) => {
if (textualInversion.includes('<' || '>')) {
return new RegExp(`${textualInversion}`);
} else {
return new RegExp(`\\b${textualInversion}\\b`);
}
};
const textualInversionExists = (
state: GenerationState,
textualInversion: string
) => {
const textualInversionRegex = getTIRegex(textualInversion);
if (!state.addTIToNegative) {
if (state.prompt.match(textualInversionRegex)) return true;
} else {
if (state.negativePrompt.match(textualInversionRegex)) return true;
}
return false;
};
const handleTypedTICheck = (
state: GenerationState,
newPrompt: string,
toNegative: boolean
) => {
let textualInversionsInUse = !toNegative
? [...state.textualInversionsInUse]
: [...state.negativeTextualInversionsInUse]; // Get Words In Prompt
const textualInversionRegex = /([\w<>!@%&*_-]+)/g; // Scan For Each Word
const textualInversionMatches = [
...newPrompt.matchAll(textualInversionRegex),
]; // Match All Words
if (textualInversionMatches.length > 0) {
textualInversionsInUse = []; // Reset Textual Inversions In Use
textualInversionMatches.forEach((textualInversionMatch) => {
const textualInversionName = textualInversionMatch[0];
if (
(!textualInversionsInUse.includes(textualInversionName) &&
state.localTextualInversionTriggers.includes(textualInversionName)) ||
state.huggingFaceTextualInversionConcepts.includes(textualInversionName)
) {
textualInversionsInUse.push(textualInversionName); // Add Textual Inversions In Prompt
}
});
} else {
textualInversionsInUse = []; // If No Matches, Remove Textual Inversions In Use
}
if (!toNegative) {
state.textualInversionsInUse = textualInversionsInUse;
} else {
state.negativeTextualInversionsInUse = textualInversionsInUse;
}
};
const handleTypedLoraCheck = (state: GenerationState, newPrompt: string) => {
let lorasInUse = [...state.lorasInUse]; // Get Loras In Prompt
const loraRegex = /withLora\(([^\\)]+)\)/g; // Scan For Lora Syntax
const loraMatches = [...newPrompt.matchAll(loraRegex)]; // Match All Lora Syntaxes
if (loraMatches.length > 0) {
lorasInUse = []; // Reset Loras In Use
loraMatches.forEach((loraMatch) => {
const loraName = loraMatch[1].split(',')[0];
if (!lorasInUse.includes(loraName)) lorasInUse.push(loraName); // Add Loras In Prompt
});
} else {
lorasInUse = []; // If No Matches, Remove Loras In Use
}
state.lorasInUse = lorasInUse;
};
export const generationSlice = createSlice({
name: 'generation',
initialState,
reducers: {
setPrompt: (state, action: PayloadAction<string | InvokeAI.Prompt>) => {
const newPrompt = action.payload;
if (typeof newPrompt === 'string') {
state.prompt = newPrompt;
} else {
@@ -193,136 +94,6 @@ export const generationSlice = createSlice({
state.negativePrompt = promptToString(newPrompt);
}
},
handlePromptCheckers: (
state,
action: PayloadAction<{
prompt: string | InvokeAI.Prompt;
toNegative: boolean;
}>
) => {
const newPrompt = action.payload.prompt;
if (typeof newPrompt === 'string') {
if (!action.payload.toNegative) handleTypedLoraCheck(state, newPrompt);
handleTypedTICheck(state, newPrompt, action.payload.toNegative);
}
},
setLorasInUse: (state, action: PayloadAction<string>) => {
const newLora = action.payload;
const loras = [...state.lorasInUse];
if (loraExists(state, newLora)) {
const loraRegex = new RegExp(
`withLora\\(${newLora},?\\s*([^\\)]+)?\\)`,
'g'
);
const newPrompt = state.prompt.replaceAll(loraRegex, '');
state.prompt = newPrompt.trim();
if (loras.includes(newLora)) {
const newLoraIndex = loras.indexOf(newLora);
if (newLoraIndex > -1) loras.splice(newLoraIndex, 1);
}
} else {
state.prompt = `${state.prompt.trim()} withLora(${newLora},0.75)`;
if (!loras.includes(newLora)) loras.push(newLora);
}
state.lorasInUse = loras;
},
setClearLoras: (state) => {
const lorasInUse = [...state.lorasInUse];
lorasInUse.forEach((lora) => {
const loraRegex = new RegExp(
`withLora\\(${lora},?\\s*([^\\)]+)?\\)`,
'g'
);
const newPrompt = state.prompt.replaceAll(loraRegex, '');
state.prompt = newPrompt.trim();
});
state.lorasInUse = [];
},
setTextualInversionsInUse: (state, action: PayloadAction<string>) => {
const newTextualInversion = action.payload;
const textualInversions = [...state.textualInversionsInUse];
const negativeTextualInversions = [
...state.negativeTextualInversionsInUse,
];
if (textualInversionExists(state, newTextualInversion)) {
const textualInversionRegex = getTIRegex(newTextualInversion);
if (!state.addTIToNegative) {
const newPrompt = state.prompt.replace(textualInversionRegex, '');
state.prompt = newPrompt.trim();
const newTIIndex = textualInversions.indexOf(newTextualInversion);
if (newTIIndex > -1) textualInversions.splice(newTIIndex, 1);
} else {
const newPrompt = state.negativePrompt.replace(
textualInversionRegex,
''
);
state.negativePrompt = newPrompt.trim();
const newTIIndex =
negativeTextualInversions.indexOf(newTextualInversion);
if (newTIIndex > -1) negativeTextualInversions.splice(newTIIndex, 1);
}
} else {
if (!state.addTIToNegative) {
state.prompt = `${state.prompt.trim()} ${newTextualInversion}`;
textualInversions.push(newTextualInversion);
} else {
state.negativePrompt = `${state.negativePrompt.trim()} ${newTextualInversion}`;
negativeTextualInversions.push(newTextualInversion);
}
}
state.textualInversionsInUse = textualInversions;
state.negativeTextualInversionsInUse = negativeTextualInversions;
},
setClearTextualInversions: (state) => {
const textualInversions = [...state.textualInversionsInUse];
const negativeTextualInversions = [
...state.negativeTextualInversionsInUse,
];
textualInversions.forEach((ti) => {
const textualInversionRegex = getTIRegex(ti);
const newPrompt = state.prompt.replace(textualInversionRegex, '');
state.prompt = newPrompt.trim();
});
negativeTextualInversions.forEach((ti) => {
const textualInversionRegex = getTIRegex(ti);
const newPrompt = state.negativePrompt.replace(
textualInversionRegex,
''
);
state.negativePrompt = newPrompt.trim();
});
state.textualInversionsInUse = [];
state.negativeTextualInversionsInUse = [];
},
setAddTIToNegative: (state, action: PayloadAction<boolean>) => {
state.addTIToNegative = action.payload;
},
setLocalTextualInversionTriggers: (
state,
action: PayloadAction<string[]>
) => {
state.localTextualInversionTriggers = action.payload;
},
setHuggingFaceTextualInversionConcepts: (
state,
action: PayloadAction<string[]>
) => {
state.huggingFaceTextualInversionConcepts = action.payload;
},
setIterations: (state, action: PayloadAction<number>) => {
state.iterations = action.payload;
},
@@ -530,6 +301,7 @@ export const generationSlice = createSlice({
state.perlin = perlin;
}
if (typeof seamless === 'boolean') state.seamless = seamless;
// if (typeof hires_fix === 'boolean') state.hiresFix = hires_fix; // TODO: Needs to be fixed after reorg
if (width) state.width = width;
if (height) state.height = height;
@@ -603,14 +375,6 @@ export const {
setPerlin,
setPrompt,
setNegativePrompt,
handlePromptCheckers,
setLorasInUse,
setClearLoras,
setHuggingFaceTextualInversionConcepts,
setLocalTextualInversionTriggers,
setTextualInversionsInUse,
setAddTIToNegative,
setClearTextualInversions,
setSampler,
setSeamBlur,
setSeamless,

View File

@@ -1,6 +1,5 @@
import type { PayloadAction } from '@reduxjs/toolkit';
import { createSlice } from '@reduxjs/toolkit';
import * as InvokeAI from 'app/invokeai';
import { FACETOOL_TYPES } from 'app/constants';
export type UpscalingLevel = 2 | 4;
@@ -41,17 +40,6 @@ export const postprocessingSlice = createSlice({
name: 'postprocessing',
initialState,
reducers: {
setAllPostProcessingParameters: (
state,
action: PayloadAction<InvokeAI.Metadata>
) => {
const { type, hires_fix } = action.payload.image;
if (type === 'txt2img') {
state.hiresFix = Boolean(hires_fix);
// Strength of img2img used in hires_fix is not currently exposed in the Metadata for the final image.
}
},
setFacetoolStrength: (state, action: PayloadAction<number>) => {
state.facetoolStrength = action.payload;
},
@@ -95,7 +83,6 @@ export const postprocessingSlice = createSlice({
});
export const {
setAllPostProcessingParameters,
resetPostprocessingState,
setCodeformerFidelity,
setFacetoolStrength,

View File

@@ -181,8 +181,7 @@ export default function SearchModels() {
const configFiles = {
v1: 'configs/stable-diffusion/v1-inference.yaml',
v2_base: 'configs/stable-diffusion/v2-inference-v.yaml',
v2_768: 'configs/stable-diffusion/v2-inference-v.yaml',
v2: 'configs/stable-diffusion/v2-inference-v.yaml',
inpainting: 'configs/stable-diffusion/v1-inpainting-inference.yaml',
custom: pathToConfig,
};
@@ -386,8 +385,7 @@ export default function SearchModels() {
>
<Flex gap={4}>
<Radio value="v1">{t('modelManager.v1')}</Radio>
<Radio value="v2_base">{t('modelManager.v2_base')}</Radio>
<Radio value="v2_768">{t('modelManager.v2_768')}</Radio>
<Radio value="v2">{t('modelManager.v2')}</Radio>
<Radio value="inpainting">
{t('modelManager.inpainting')}
</Radio>

View File

@@ -31,7 +31,6 @@ import {
} from 'features/system/store/systemSlice';
import { uiSelector } from 'features/ui/store/uiSelectors';
import {
setShouldShowHuggingFaceConcepts,
setShouldUseCanvasBetaLayout,
setShouldUseSliders,
} from 'features/ui/store/uiSlice';
@@ -53,11 +52,7 @@ const selector = createSelector(
enableImageDebugging,
} = system;
const {
shouldUseCanvasBetaLayout,
shouldUseSliders,
shouldShowHuggingFaceConcepts,
} = ui;
const { shouldUseCanvasBetaLayout, shouldUseSliders } = ui;
return {
shouldDisplayInProgressType,
@@ -68,7 +63,6 @@ const selector = createSelector(
enableImageDebugging,
shouldUseCanvasBetaLayout,
shouldUseSliders,
shouldShowHuggingFaceConcepts,
};
},
{
@@ -113,7 +107,6 @@ const SettingsModal = ({ children }: SettingsModalProps) => {
enableImageDebugging,
shouldUseCanvasBetaLayout,
shouldUseSliders,
shouldShowHuggingFaceConcepts,
} = useAppSelector(selector);
/**
@@ -213,14 +206,6 @@ const SettingsModal = ({ children }: SettingsModalProps) => {
dispatch(setShouldUseSliders(e.target.checked))
}
/>
<IAISwitch
styleClass="settings-modal-item"
label={t('settings.showHuggingFaceConcepts')}
isChecked={shouldShowHuggingFaceConcepts}
onChange={(e: ChangeEvent<HTMLInputElement>) =>
dispatch(setShouldShowHuggingFaceConcepts(e.target.checked))
}
/>
</div>
<div className="settings-modal-items">

View File

@@ -51,13 +51,6 @@ export interface SystemState
toastQueue: UseToastOptions[];
searchFolder: string | null;
foundModels: InvokeAI.FoundModel[] | null;
foundLoras: InvokeAI.FoundLora[] | null;
foundLocalTextualInversionTriggers:
| InvokeAI.FoundTextualInversionTriggers[]
| null;
foundHuggingFaceTextualInversionTriggers:
| InvokeAI.FoundTextualInversionTriggers[]
| null;
openModel: string | null;
cancelOptions: {
cancelType: CancelType;
@@ -100,9 +93,6 @@ const initialSystemState: SystemState = {
toastQueue: [],
searchFolder: null,
foundModels: null,
foundLoras: null,
foundLocalTextualInversionTriggers: null,
foundHuggingFaceTextualInversionTriggers: null,
openModel: null,
cancelOptions: {
cancelType: 'immediate',
@@ -272,24 +262,6 @@ export const systemSlice = createSlice({
) => {
state.foundModels = action.payload;
},
setFoundLoras: (
state,
action: PayloadAction<InvokeAI.FoundLora[] | null>
) => {
state.foundLoras = action.payload;
},
setFoundLocalTextualInversionTriggers: (
state,
action: PayloadAction<InvokeAI.FoundTextualInversionTriggers[] | null>
) => {
state.foundLocalTextualInversionTriggers = action.payload;
},
setFoundHuggingFaceTextualInversionTriggers: (
state,
action: PayloadAction<InvokeAI.FoundTextualInversionTriggers[] | null>
) => {
state.foundHuggingFaceTextualInversionTriggers = action.payload;
},
setOpenModel: (state, action: PayloadAction<string | null>) => {
state.openModel = action.payload;
},
@@ -331,9 +303,6 @@ export const {
setProcessingIndeterminateTask,
setSearchFolder,
setFoundModels,
setFoundLoras,
setFoundLocalTextualInversionTriggers,
setFoundHuggingFaceTextualInversionTriggers,
setOpenModel,
setCancelType,
setCancelAfter,

View File

@@ -18,7 +18,6 @@ import PromptInput from 'features/parameters/components/PromptInput/PromptInput'
import InvokeOptionsPanel from 'features/ui/components/InvokeParametersPanel';
import { useTranslation } from 'react-i18next';
import ImageToImageOptions from './ImageToImageOptions';
import PromptExtras from 'features/parameters/components/PromptInput/Extras/PromptExtras';
export default function ImageToImagePanel() {
const { t } = useTranslation();
@@ -64,7 +63,6 @@ export default function ImageToImagePanel() {
<Flex flexDir="column" rowGap="0.5rem">
<PromptInput />
<NegativePromptInput />
<PromptExtras />
</Flex>
<ProcessButtons />
<MainSettings />

View File

@@ -17,7 +17,6 @@ import NegativePromptInput from 'features/parameters/components/PromptInput/Nega
import PromptInput from 'features/parameters/components/PromptInput/PromptInput';
import InvokeOptionsPanel from 'features/ui/components/InvokeParametersPanel';
import { useTranslation } from 'react-i18next';
import PromptExtras from 'features/parameters/components/PromptInput/Extras/PromptExtras';
export default function TextToImagePanel() {
const { t } = useTranslation();
@@ -63,7 +62,6 @@ export default function TextToImagePanel() {
<Flex flexDir="column" rowGap="0.5rem">
<PromptInput />
<NegativePromptInput />
<PromptExtras />
</Flex>
<ProcessButtons />
<MainSettings />

View File

@@ -17,7 +17,6 @@ import NegativePromptInput from 'features/parameters/components/PromptInput/Nega
import PromptInput from 'features/parameters/components/PromptInput/PromptInput';
import InvokeOptionsPanel from 'features/ui/components/InvokeParametersPanel';
import { useTranslation } from 'react-i18next';
import PromptExtras from 'features/parameters/components/PromptInput/Extras/PromptExtras';
export default function UnifiedCanvasPanel() {
const { t } = useTranslation();
@@ -74,7 +73,6 @@ export default function UnifiedCanvasPanel() {
<Flex flexDir="column" rowGap="0.5rem">
<PromptInput />
<NegativePromptInput />
<PromptExtras />
</Flex>
<ProcessButtons />
<MainSettings />

View File

@@ -15,7 +15,6 @@ const initialtabsState: UIState = {
shouldUseCanvasBetaLayout: false,
shouldShowExistingModelsInSearch: false,
shouldUseSliders: false,
shouldShowHuggingFaceConcepts: false,
addNewModelUIOption: null,
};
@@ -71,12 +70,6 @@ export const uiSlice = createSlice({
setShouldUseSliders: (state, action: PayloadAction<boolean>) => {
state.shouldUseSliders = action.payload;
},
setShouldShowHuggingFaceConcepts: (
state,
action: PayloadAction<boolean>
) => {
state.shouldShowHuggingFaceConcepts = action.payload;
},
setAddNewModelUIOption: (state, action: PayloadAction<AddNewModelType>) => {
state.addNewModelUIOption = action.payload;
},
@@ -95,7 +88,6 @@ export const {
setShouldUseCanvasBetaLayout,
setShouldShowExistingModelsInSearch,
setShouldUseSliders,
setShouldShowHuggingFaceConcepts,
setAddNewModelUIOption,
} = uiSlice.actions;

View File

@@ -12,6 +12,5 @@ export interface UIState {
shouldUseCanvasBetaLayout: boolean;
shouldShowExistingModelsInSearch: boolean;
shouldUseSliders: boolean;
shouldShowHuggingFaceConcepts: boolean;
addNewModelUIOption: AddNewModelType;
}

View File

@@ -8,6 +8,7 @@
--accent-color-bright: rgb(104, 60, 230);
--accent-color-hover: var(--accent-color-bright);
// App Colors
--root-bg-color: rgb(10, 10, 10);
--background-color: rgb(26, 26, 32);
--background-color-light: rgb(40, 44, 48);
@@ -118,7 +119,6 @@
--context-menu-bg-color: rgb(46, 48, 58);
--context-menu-box-shadow: none;
--context-menu-bg-color-hover: rgb(30, 32, 42);
--context-menu-active-item: var(--accent-color-bright);
// Shadows
--floating-button-drop-shadow-color: var(--accent-color);

View File

@@ -117,7 +117,6 @@
--context-menu-bg-color: rgb(46, 48, 58);
--context-menu-box-shadow: none;
--context-menu-bg-color-hover: rgb(30, 32, 42);
--context-menu-active-item: var(--accent-color-bright);
// Shadows
--floating-button-drop-shadow-color: var(--accent-color);

View File

@@ -114,7 +114,6 @@
--context-menu-box-shadow: 0px 10px 38px -10px rgba(22, 23, 24, 0.35),
0px 10px 20px -15px rgba(22, 23, 24, 0.2);
--context-menu-bg-color-hover: var(--background-color-secondary);
--context-menu-active-item: rgb(0, 0, 0);
// Shadows
--floating-button-drop-shadow-color: rgba(0, 0, 0, 0.7);

File diff suppressed because one or more lines are too long

View File

@@ -56,26 +56,26 @@
"@babel/helper-validator-identifier" "^7.19.1"
to-fast-properties "^2.0.0"
"@chakra-ui/accordion@2.1.11":
version "2.1.11"
resolved "https://registry.yarnpkg.com/@chakra-ui/accordion/-/accordion-2.1.11.tgz#c6df0100c543645d0631df3aefde2ea2b8ed6313"
integrity sha512-mfVPmqETp9pyRDHJ33AdF19oHv/LyxVzQJtlxUByuvs8Cj9QQZ2LQLg5kejm+b3mj03A7A6yfbuo3RNaI4Bhsg==
"@chakra-ui/accordion@2.1.9":
version "2.1.9"
resolved "https://registry.yarnpkg.com/@chakra-ui/accordion/-/accordion-2.1.9.tgz#20fa86d94dc034251df2f7c8595ae4dd541a29d9"
integrity sha512-a9CKIAUHezc0f5FR/SQ4GVxnWuIb2HbDTxTEKTp58w/J9pecIbJaNrJ5TUZ0MVbDU9jkgO9RsZ29jkja8PomAw==
dependencies:
"@chakra-ui/descendant" "3.0.14"
"@chakra-ui/descendant" "3.0.13"
"@chakra-ui/icon" "3.0.16"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-use-controllable-state" "2.0.8"
"@chakra-ui/react-use-merge-refs" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/transition" "2.0.16"
"@chakra-ui/transition" "2.0.15"
"@chakra-ui/alert@2.1.0":
version "2.1.0"
resolved "https://registry.yarnpkg.com/@chakra-ui/alert/-/alert-2.1.0.tgz#7a234ac6426231b39243088648455cbcf1cbdf24"
integrity sha512-OcfHwoXI5VrmM+tHJTHT62Bx6TfyfCxSa0PWUOueJzSyhlUOKBND5we6UtrOB7D0jwX45qKKEDJOLG5yCG21jQ==
"@chakra-ui/alert@2.0.17":
version "2.0.17"
resolved "https://registry.yarnpkg.com/@chakra-ui/alert/-/alert-2.0.17.tgz#b129732ec308db6a6a1afa7c06a6595ad853c967"
integrity sha512-0Y5vw+HkeXpwbL1roVpSSNM6luMRmUbwduUSHEA4OnX1ismvsDb1ZBfpi4Vxp6w8euJ2Uj6df3krbd5tbCP6tg==
dependencies:
"@chakra-ui/icon" "3.0.16"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/spinner" "2.0.13"
@@ -84,23 +84,23 @@
resolved "https://registry.yarnpkg.com/@chakra-ui/anatomy/-/anatomy-2.1.2.tgz#ea66b1841e7195da08ddc862daaa3f3e56e565f5"
integrity sha512-pKfOS/mztc4sUXHNc8ypJ1gPWSolWT770jrgVRfolVbYlki8y5Y+As996zMF6k5lewTu6j9DQequ7Cc9a69IVQ==
"@chakra-ui/avatar@2.2.8":
version "2.2.8"
resolved "https://registry.yarnpkg.com/@chakra-ui/avatar/-/avatar-2.2.8.tgz#a6e16accb2bb9c879f197090ccc9df1ff42992a6"
integrity sha512-uBs9PMrqyK111tPIYIKnOM4n3mwgKqGpvYmtwBnnbQLTNLg4gtiWWVbpTuNMpyu1av0xQYomjUt8Doed8w6p8g==
"@chakra-ui/avatar@2.2.5":
version "2.2.5"
resolved "https://registry.yarnpkg.com/@chakra-ui/avatar/-/avatar-2.2.5.tgz#50eb7cc5a172d394b301fa0abd5f607b7f5d3563"
integrity sha512-TEHXuGE79+fEn61qJ7J/A0Ec+WjyNwobrDTATcLg9Zx2/WEMmZNfrWIAlI5ANQAwVbdSWeGVbyoLAK5mbcrE0A==
dependencies:
"@chakra-ui/image" "2.0.15"
"@chakra-ui/react-children-utils" "2.0.6"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/breadcrumb@2.1.5":
version "2.1.5"
resolved "https://registry.yarnpkg.com/@chakra-ui/breadcrumb/-/breadcrumb-2.1.5.tgz#a43b22cc8005291a615696a8c88efc37064562f3"
integrity sha512-p3eQQrHQBkRB69xOmNyBJqEdfCrMt+e0eOH+Pm/DjFWfIVIbnIaFbmDCeWClqlLa21Ypc6h1hR9jEmvg8kmOog==
"@chakra-ui/breadcrumb@2.1.4":
version "2.1.4"
resolved "https://registry.yarnpkg.com/@chakra-ui/breadcrumb/-/breadcrumb-2.1.4.tgz#0d249dc2a92639bd2bf46d097dd5445112bd2367"
integrity sha512-vyBx5TAxPnHhb0b8nyRGfqyjleD//9mySFhk96c9GL+T6YDO4swHw5y/kvDv3Ngc/iRwJ9hdI49PZKwPxLqsEg==
dependencies:
"@chakra-ui/react-children-utils" "2.0.6"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/breakpoint-utils@2.0.8":
@@ -110,12 +110,12 @@
dependencies:
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/button@2.0.18":
version "2.0.18"
resolved "https://registry.yarnpkg.com/@chakra-ui/button/-/button-2.0.18.tgz#c13d2e404e22a9873ba5373fde494bedafe32fdd"
integrity sha512-E3c99+lOm6ou4nQVOTLkG+IdOPMjsQK+Qe7VyP8A/xeAMFONuibrWPRPpprr4ZkB4kEoLMfNuyH2+aEza3ScUA==
"@chakra-ui/button@2.0.16":
version "2.0.16"
resolved "https://registry.yarnpkg.com/@chakra-ui/button/-/button-2.0.16.tgz#ff315b57ee47c3511a6507fcfb6f00bb93e2ac7d"
integrity sha512-NjuTKa7gNhnGSUutKuTc8HoAOe9WWIigpciBG7yj3ok67kg8bXtSzPyQFZlgTY6XGdAckWTT+Do4tvhwa5LA+g==
dependencies:
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-use-merge-refs" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/spinner" "2.0.13"
@@ -127,13 +127,13 @@
dependencies:
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/checkbox@2.2.14":
version "2.2.14"
resolved "https://registry.yarnpkg.com/@chakra-ui/checkbox/-/checkbox-2.2.14.tgz#902acc99a9a80c1c304788a230cf36f8116e8260"
integrity sha512-uqo6lFWLqYBujPglrvRhTAErtuIXpmdpc5w0W4bjK7kyvLhxOpUh1hlDb2WoqlNpfRn/OaNeF6VinPnf9BJL8w==
"@chakra-ui/checkbox@2.2.10":
version "2.2.10"
resolved "https://registry.yarnpkg.com/@chakra-ui/checkbox/-/checkbox-2.2.10.tgz#e4f773e7d2464f1d6e9d18dd88b679290cb33171"
integrity sha512-vzxEjw99qj7loxAdP1WuHNt4EAvj/t6cc8oxyOB2mEvkAzhxI34rLR+3zWDuHWsmhyUO+XEDh4FiWdR+DK5Siw==
dependencies:
"@chakra-ui/form-control" "2.0.18"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/form-control" "2.0.17"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-types" "2.0.7"
"@chakra-ui/react-use-callback-ref" "2.0.7"
"@chakra-ui/react-use-controllable-state" "2.0.8"
@@ -142,7 +142,7 @@
"@chakra-ui/react-use-update-effect" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/visually-hidden" "2.0.15"
"@zag-js/focus-visible" "0.2.2"
"@zag-js/focus-visible" "0.2.1"
"@chakra-ui/clickable@2.0.14":
version "2.0.14"
@@ -180,17 +180,17 @@
"@chakra-ui/react-use-callback-ref" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/css-reset@2.1.1":
version "2.1.1"
resolved "https://registry.yarnpkg.com/@chakra-ui/css-reset/-/css-reset-2.1.1.tgz#c61f3d2103c13e62a86fd2d359682092e961852c"
integrity sha512-jwEOfIAWmQsnChHQTW/eRE+dfE4MjmhvSvoUug5nkV1pI7veC/20noFlIZxzi82EbiQI8Fs0+Jnusgxr2yaOHA==
"@chakra-ui/css-reset@2.0.12":
version "2.0.12"
resolved "https://registry.yarnpkg.com/@chakra-ui/css-reset/-/css-reset-2.0.12.tgz#6eebcbe9e971facd215e174e063ace29f647a045"
integrity sha512-Q5OYIMvqTl2vZ947kIYxcS5DhQXeStB84BzzBd6C10wOx1gFUu9pL+jLpOnHR3hhpWRMdX5o7eT+gMJWIYUZ0Q==
"@chakra-ui/descendant@3.0.14":
version "3.0.14"
resolved "https://registry.yarnpkg.com/@chakra-ui/descendant/-/descendant-3.0.14.tgz#fe8bac3f0e1ffe562e3e73eac393dbf222d57e13"
integrity sha512-+Ahvp9H4HMpfScIv9w1vaecGz7qWAaK1YFHHolz/SIsGLaLGlbdp+5UNabQC7L6TUnzzJDQDxzwif78rTD7ang==
"@chakra-ui/descendant@3.0.13":
version "3.0.13"
resolved "https://registry.yarnpkg.com/@chakra-ui/descendant/-/descendant-3.0.13.tgz#e883a2233ee07fe1ae6c014567824c0f79df11cf"
integrity sha512-9nzxZVxUSMc4xPL5fSaRkEOQjDQWUGjGvrZI7VzWk9eq63cojOtIxtWMSW383G9148PzWJjJYt30Eud5tdZzlg==
dependencies:
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-use-merge-refs" "2.0.7"
"@chakra-ui/dom-utils@2.0.6":
@@ -198,12 +198,12 @@
resolved "https://registry.yarnpkg.com/@chakra-ui/dom-utils/-/dom-utils-2.0.6.tgz#68f49f3b4a0bdebd5e416d6fd2c012c9ad64b76a"
integrity sha512-PVtDkPrDD5b8aoL6Atg7SLjkwhWb7BwMcLOF1L449L3nZN+DAO3nyAh6iUhZVJyunELj9d0r65CDlnMREyJZmA==
"@chakra-ui/editable@2.0.21":
version "2.0.21"
resolved "https://registry.yarnpkg.com/@chakra-ui/editable/-/editable-2.0.21.tgz#bc74510470d6d455844438e540851896d3879132"
integrity sha512-oYuXbHnggxSYJN7P9Pn0Scs9tPC91no4z1y58Oe+ILoJKZ+bFAEHtL7FEISDNJxw++MEukeFu7GU1hVqmdLsKQ==
"@chakra-ui/editable@2.0.19":
version "2.0.19"
resolved "https://registry.yarnpkg.com/@chakra-ui/editable/-/editable-2.0.19.tgz#1af2fe3c215111f61f7872fb5f599f4d8da24e7d"
integrity sha512-YxRJsJ2JQd42zfPBgTKzIhg1HugT+gfQz1ZosmUN+IZT9YZXL2yodHTUz6Lee04Vc/CdEqgBFLuREXEUNBfGtA==
dependencies:
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-types" "2.0.7"
"@chakra-ui/react-use-callback-ref" "2.0.7"
"@chakra-ui/react-use-controllable-state" "2.0.8"
@@ -226,13 +226,13 @@
"@chakra-ui/dom-utils" "2.0.6"
react-focus-lock "^2.9.2"
"@chakra-ui/form-control@2.0.18":
version "2.0.18"
resolved "https://registry.yarnpkg.com/@chakra-ui/form-control/-/form-control-2.0.18.tgz#1923f293afde70b2b07ca731d98fef3660098c56"
integrity sha512-I0a0jG01IAtRPccOXSNugyRdUAe8Dy40ctqedZvznMweOXzbMCF1m+sHPLdWeWC/VI13VoAispdPY0/zHOdjsQ==
"@chakra-ui/form-control@2.0.17":
version "2.0.17"
resolved "https://registry.yarnpkg.com/@chakra-ui/form-control/-/form-control-2.0.17.tgz#2f710325e77ce35067337616d440f903b137bdd5"
integrity sha512-34ptCaJ2LNvQNOlB6MAKsmH1AkT1xo7E+3Vw10Urr81yTOjDTM/iU6vG3JKPfRDMyXeowPjXmutlnuk72SSjRg==
dependencies:
"@chakra-ui/icon" "3.0.16"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-types" "2.0.7"
"@chakra-ui/react-use-merge-refs" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
@@ -254,10 +254,10 @@
dependencies:
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/icons@^2.0.18":
version "2.0.18"
resolved "https://registry.yarnpkg.com/@chakra-ui/icons/-/icons-2.0.18.tgz#6f859d2e0d8f31fea9cb2e6507d65eb65cb95cf5"
integrity sha512-E/+DF/jw7kdN4/XxCZRnr4FdMXhkl50Q34MVwN9rADWMwPK9uSZPGyC7HOx6rilo7q4bFjYDH3yRj9g+VfbVkg==
"@chakra-ui/icons@^2.0.17":
version "2.0.17"
resolved "https://registry.yarnpkg.com/@chakra-ui/icons/-/icons-2.0.17.tgz#625a46d169707aad36d65c04a4626a422f92e5ae"
integrity sha512-HMJP0WrJgAmFR9+Xh/CBH0nVnGMsJ4ZC8MK6tMgxPKd9/muvn0I4hsicHqdPlLpmB0TlxlhkBAKaVMtOdz6F0w==
dependencies:
"@chakra-ui/icon" "3.0.16"
@@ -269,27 +269,27 @@
"@chakra-ui/react-use-safe-layout-effect" "2.0.5"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/input@2.0.21":
version "2.0.21"
resolved "https://registry.yarnpkg.com/@chakra-ui/input/-/input-2.0.21.tgz#a7e55ea6fa32ae39c0f6ec44ca2189933fda9eb5"
integrity sha512-AIWjjg6MgcOtlvKmVoZfPPfgF+sBSWL3Zq2HSCAMvS6h7jfxz/Xv0UTFGPk5F4Wt0YHT7qMySg0Jsm0b78HZJg==
"@chakra-ui/input@2.0.20":
version "2.0.20"
resolved "https://registry.yarnpkg.com/@chakra-ui/input/-/input-2.0.20.tgz#8db3ec46b52be901c94599b3659a9003bdb2dd07"
integrity sha512-ypmsy4n4uNBVgn6Gd24Zrpi+qRf/T9WEzWkysuYC9Qfxo+i7yuf3snp7XmBy8KSGVSiXE11eO8ZN5oCg6Xg0jg==
dependencies:
"@chakra-ui/form-control" "2.0.18"
"@chakra-ui/form-control" "2.0.17"
"@chakra-ui/object-utils" "2.0.8"
"@chakra-ui/react-children-utils" "2.0.6"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/layout@2.1.18":
version "2.1.18"
resolved "https://registry.yarnpkg.com/@chakra-ui/layout/-/layout-2.1.18.tgz#f5dba687dfced9145d495f3a21edb5672df6bb73"
integrity sha512-F4Gh2e+DGdaWdWT5NZduIFD9NM7Bnuh8sXARFHWPvIu7yvAwZ3ddqC9GK4F3qUngdmkJxDLWQqRSwSh96Lxbhw==
"@chakra-ui/layout@2.1.16":
version "2.1.16"
resolved "https://registry.yarnpkg.com/@chakra-ui/layout/-/layout-2.1.16.tgz#9d90f25cf9f0537d19cd36a417f7ddc1461e8591"
integrity sha512-QFS3feozIGsvB0H74lUocev55aRF26eNrdmhfJifwikZAiq+zzZAMdBdNU9UJhHClnMOU8/iGZ0MF7ti4zQS1A==
dependencies:
"@chakra-ui/breakpoint-utils" "2.0.8"
"@chakra-ui/icon" "3.0.16"
"@chakra-ui/object-utils" "2.0.8"
"@chakra-ui/react-children-utils" "2.0.6"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/lazy-utils@2.0.5":
@@ -311,17 +311,17 @@
"@chakra-ui/react-env" "3.0.0"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/menu@2.1.12":
version "2.1.12"
resolved "https://registry.yarnpkg.com/@chakra-ui/menu/-/menu-2.1.12.tgz#ab83b7a5165bd31a6c68328d7f65a79e3412c48d"
integrity sha512-ylNK1VJlr/3/EGg9dLPZ87cBJJjeiYXeU/gOAphsKXMnByrXWhbp4YVnyyyha2KZ0zEw0aPU4nCZ+A69aT9wrg==
"@chakra-ui/menu@2.1.9":
version "2.1.9"
resolved "https://registry.yarnpkg.com/@chakra-ui/menu/-/menu-2.1.9.tgz#2f3239a9b2855fd77fc317d9e6b904c1ad50d7c6"
integrity sha512-ue5nD4QJcl3H3UwN0zZNJmH89XUebnvEdW6THAUL41hDjJ0J/Fjpg9Sgzwug2aBbBXBNbVMsUuhcCj6x91d+IQ==
dependencies:
"@chakra-ui/clickable" "2.0.14"
"@chakra-ui/descendant" "3.0.14"
"@chakra-ui/descendant" "3.0.13"
"@chakra-ui/lazy-utils" "2.0.5"
"@chakra-ui/popper" "3.0.13"
"@chakra-ui/react-children-utils" "2.0.6"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-use-animation-state" "2.0.8"
"@chakra-ui/react-use-controllable-state" "2.0.8"
"@chakra-ui/react-use-disclosure" "2.0.8"
@@ -330,33 +330,33 @@
"@chakra-ui/react-use-outside-click" "2.0.7"
"@chakra-ui/react-use-update-effect" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/transition" "2.0.16"
"@chakra-ui/transition" "2.0.15"
"@chakra-ui/modal@2.2.11":
version "2.2.11"
resolved "https://registry.yarnpkg.com/@chakra-ui/modal/-/modal-2.2.11.tgz#8a964288759f3d681e23bfc3a837a3e2c7523f8e"
integrity sha512-2J0ZUV5tEzkPiawdkgPz6bmex7NXAde1VXooMwdvK+vuT8PV3U61yorTJOZVLdw7TjjI1Yo94mzsp6UwBud43Q==
"@chakra-ui/modal@2.2.9":
version "2.2.9"
resolved "https://registry.yarnpkg.com/@chakra-ui/modal/-/modal-2.2.9.tgz#aad65a2c60aa974e023f8b3facc0e79eb742e006"
integrity sha512-nTfNp7XsVwn5+xJOtstoFA8j0kq/9sJj7KesyYzjEDaMKvCZvIOntRYowoydho43jb4+YC7ebKhp0KOIINS0gg==
dependencies:
"@chakra-ui/close-button" "2.0.17"
"@chakra-ui/focus-lock" "2.0.16"
"@chakra-ui/portal" "2.0.16"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/portal" "2.0.15"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-types" "2.0.7"
"@chakra-ui/react-use-merge-refs" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/transition" "2.0.16"
"@chakra-ui/transition" "2.0.15"
aria-hidden "^1.2.2"
react-remove-scroll "^2.5.5"
"@chakra-ui/number-input@2.0.19":
version "2.0.19"
resolved "https://registry.yarnpkg.com/@chakra-ui/number-input/-/number-input-2.0.19.tgz#82d4522036904c04d07e7050822fc522f9b32233"
integrity sha512-HDaITvtMEqOauOrCPsARDxKD9PSHmhWywpcyCSOX0lMe4xx2aaGhU0QQFhsJsykj8Er6pytMv6t0KZksdDv3YA==
"@chakra-ui/number-input@2.0.18":
version "2.0.18"
resolved "https://registry.yarnpkg.com/@chakra-ui/number-input/-/number-input-2.0.18.tgz#072a00ef869ebafa4960cfdee8caae8208864289"
integrity sha512-cPkyAFFHHzeFBselrT1BtjlzMkJ6TKrTDUnHFlzqXy6aqeXuhrjFhMfXucjedSpOqedsP9ZbKFTdIAhu9DdL/A==
dependencies:
"@chakra-ui/counter" "2.0.14"
"@chakra-ui/form-control" "2.0.18"
"@chakra-ui/form-control" "2.0.17"
"@chakra-ui/icon" "3.0.16"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-types" "2.0.7"
"@chakra-ui/react-use-callback-ref" "2.0.7"
"@chakra-ui/react-use-event-listener" "2.0.7"
@@ -376,27 +376,27 @@
resolved "https://registry.yarnpkg.com/@chakra-ui/object-utils/-/object-utils-2.0.8.tgz#307f927f6434f99feb32ba92bdf451a6b59a6199"
integrity sha512-2upjT2JgRuiupdrtBWklKBS6tqeGMA77Nh6Q0JaoQuH/8yq+15CGckqn3IUWkWoGI0Fg3bK9LDlbbD+9DLw95Q==
"@chakra-ui/pin-input@2.0.20":
version "2.0.20"
resolved "https://registry.yarnpkg.com/@chakra-ui/pin-input/-/pin-input-2.0.20.tgz#5bf115bf4282b69fc6532a9c542cbf41f815d200"
integrity sha512-IHVmerrtHN8F+jRB3W1HnMir1S1TUCWhI7qDInxqPtoRffHt6mzZgLZ0izx8p1fD4HkW4c1d4/ZLEz9uH9bBRg==
"@chakra-ui/pin-input@2.0.19":
version "2.0.19"
resolved "https://registry.yarnpkg.com/@chakra-ui/pin-input/-/pin-input-2.0.19.tgz#f9b196174f0518feec5c1ee3fcaf2134c301148a"
integrity sha512-6O7s4vWz4cqQ6zvMov9sYj6ZqWAsTxR/MNGe3DNgu1zWQg8veNCYtj1rNGhNS3eZNUMAa8uM2dXIphGTP53Xow==
dependencies:
"@chakra-ui/descendant" "3.0.14"
"@chakra-ui/descendant" "3.0.13"
"@chakra-ui/react-children-utils" "2.0.6"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-use-controllable-state" "2.0.8"
"@chakra-ui/react-use-merge-refs" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/popover@2.1.9":
version "2.1.9"
resolved "https://registry.yarnpkg.com/@chakra-ui/popover/-/popover-2.1.9.tgz#890cc0dfc5022757715ccf772ec194e7a409275f"
integrity sha512-OMJ12VVs9N32tFaZSOqikkKPtwAVwXYsES/D1pff/amBrE3ngCrpxJSIp4uvTdORfIYDojJqrR52ZplDKS9hRQ==
"@chakra-ui/popover@2.1.8":
version "2.1.8"
resolved "https://registry.yarnpkg.com/@chakra-ui/popover/-/popover-2.1.8.tgz#e906ce0533693d735b6e13a3a6ffe16d8e0a9ab4"
integrity sha512-ob7fAz+WWmXIq7iGHVB3wDKzZTj+T+noYBT/U1Q+jIf+jMr2WOpJLTfb0HTZcfhvn4EBFlfBg7Wk5qbXNaOn7g==
dependencies:
"@chakra-ui/close-button" "2.0.17"
"@chakra-ui/lazy-utils" "2.0.5"
"@chakra-ui/popper" "3.0.13"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-types" "2.0.7"
"@chakra-ui/react-use-animation-state" "2.0.8"
"@chakra-ui/react-use-disclosure" "2.0.8"
@@ -414,53 +414,53 @@
"@chakra-ui/react-use-merge-refs" "2.0.7"
"@popperjs/core" "^2.9.3"
"@chakra-ui/portal@2.0.16":
version "2.0.16"
resolved "https://registry.yarnpkg.com/@chakra-ui/portal/-/portal-2.0.16.tgz#e5ce3f9d9e559f17a95276e0c006d0e9b7703442"
integrity sha512-bVID0qbQ0l4xq38LdqAN4EKD4/uFkDnXzFwOlviC9sl0dNhzICDb1ltuH/Adl1d2HTMqyN60O3GO58eHy7plnQ==
"@chakra-ui/portal@2.0.15":
version "2.0.15"
resolved "https://registry.yarnpkg.com/@chakra-ui/portal/-/portal-2.0.15.tgz#21e1f97c4407fc15df8c365cb5cf799dac73ce41"
integrity sha512-z8v7K3j1/nMuBzp2+wRIIw7s/eipVtnXLdjK5yqbMxMRa44E8Mu5VNJLz3aQFLHXEUST+ifqrjImQeli9do6LQ==
dependencies:
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-use-safe-layout-effect" "2.0.5"
"@chakra-ui/progress@2.1.6":
version "2.1.6"
resolved "https://registry.yarnpkg.com/@chakra-ui/progress/-/progress-2.1.6.tgz#398db20440979c37adb0a34821f805ae3471873b"
integrity sha512-hHh5Ysv4z6bK+j2GJbi/FT9CVyto2PtNUNwBmr3oNMVsoOUMoRjczfXvvYqp0EHr9PCpxqrq7sRwgQXUzhbDSw==
"@chakra-ui/progress@2.1.5":
version "2.1.5"
resolved "https://registry.yarnpkg.com/@chakra-ui/progress/-/progress-2.1.5.tgz#eb6a47adf2bff93971262d163461d390782a04ff"
integrity sha512-jj5Vp4lxUchuwp4RPCepM0yAyKi344bgsOd3Apd+ldxclDcewPc82fbwDu7g/Xv27LqJkT+7E/SlQy04wGrk0g==
dependencies:
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/provider@2.2.2":
version "2.2.2"
resolved "https://registry.yarnpkg.com/@chakra-ui/provider/-/provider-2.2.2.tgz#a798d1c243f33e00c85763834a7350e0d1c643ad"
integrity sha512-UVwnIDnAWq1aKroN5AF+OpNpUqLVeIUk7tKvX3z4CY9FsPFFi6LTEhRHdhpwaU1Tau3Tf9agEu5URegpY7S8BA==
"@chakra-ui/provider@2.1.2":
version "2.1.2"
resolved "https://registry.yarnpkg.com/@chakra-ui/provider/-/provider-2.1.2.tgz#b025cb718826b003b3c9535b6961e8f3be70ebd5"
integrity sha512-4lLlz8QuJv00BhfyKzWpzfoti9MDOdJ/MqXixJV/EZ02RMBOdE9qy9bSz/WckPC2MVhtRUuwMkxH+0QY21PXuw==
dependencies:
"@chakra-ui/css-reset" "2.1.1"
"@chakra-ui/portal" "2.0.16"
"@chakra-ui/css-reset" "2.0.12"
"@chakra-ui/portal" "2.0.15"
"@chakra-ui/react-env" "3.0.0"
"@chakra-ui/system" "2.5.5"
"@chakra-ui/system" "2.5.1"
"@chakra-ui/utils" "2.0.15"
"@chakra-ui/radio@2.0.22":
version "2.0.22"
resolved "https://registry.yarnpkg.com/@chakra-ui/radio/-/radio-2.0.22.tgz#fad0ce7c9ba4051991ed517cac4cfe526d6d47d9"
integrity sha512-GsQ5WAnLwivWl6gPk8P1x+tCcpVakCt5R5T0HumF7DGPXKdJbjS+RaFySrbETmyTJsKY4QrfXn+g8CWVrMjPjw==
"@chakra-ui/radio@2.0.19":
version "2.0.19"
resolved "https://registry.yarnpkg.com/@chakra-ui/radio/-/radio-2.0.19.tgz#8d5c02eae8eddbced4476b1b50921ade62f0a744"
integrity sha512-PlJiV59eGSmeKP4v/4+ccQUWGRd0cjPKkj/p3L+UbOf8pl9dWm8y9kIeL5TYbghQSDv0nzkrH4+yMnnDTZjdMQ==
dependencies:
"@chakra-ui/form-control" "2.0.18"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/form-control" "2.0.17"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-types" "2.0.7"
"@chakra-ui/react-use-merge-refs" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@zag-js/focus-visible" "0.2.2"
"@zag-js/focus-visible" "0.2.1"
"@chakra-ui/react-children-utils@2.0.6":
version "2.0.6"
resolved "https://registry.yarnpkg.com/@chakra-ui/react-children-utils/-/react-children-utils-2.0.6.tgz#6c480c6a60678fcb75cb7d57107c7a79e5179b92"
integrity sha512-QVR2RC7QsOsbWwEnq9YduhpqSFnZGvjjGREV8ygKi8ADhXh93C8azLECCUVgRJF2Wc+So1fgxmjLcbZfY2VmBA==
"@chakra-ui/react-context@2.0.8":
version "2.0.8"
resolved "https://registry.yarnpkg.com/@chakra-ui/react-context/-/react-context-2.0.8.tgz#5e0ed33ac3995875a21dea0e12b0ee5fc4c2e3cc"
integrity sha512-tRTKdn6lCTXM6WPjSokAAKCw2ioih7Eg8cNgaYRSwKBck8nkz9YqxgIIEj3dJD7MGtpl24S/SNI98iRWkRwR/A==
"@chakra-ui/react-context@2.0.7":
version "2.0.7"
resolved "https://registry.yarnpkg.com/@chakra-ui/react-context/-/react-context-2.0.7.tgz#f79a2b072d04d4280ec8799dc03a8a1af521ca2e"
integrity sha512-i7EGmSU+h2GB30cwrKB4t1R5BMHyGoJM5L2Zz7b+ZUX4aAqyPcfe97wPiQB6Rgr1ImGXrUeov4CDVrRZ2FPgLQ==
"@chakra-ui/react-env@3.0.0":
version "3.0.0"
@@ -568,12 +568,12 @@
resolved "https://registry.yarnpkg.com/@chakra-ui/react-use-safe-layout-effect/-/react-use-safe-layout-effect-2.0.5.tgz#6cf388c37fd2a42b5295a292e149b32f860a00a7"
integrity sha512-MwAQBz3VxoeFLaesaSEN87reVNVbjcQBDex2WGexAg6hUB6n4gc1OWYH/iXp4tzp4kuggBNhEHkk9BMYXWfhJQ==
"@chakra-ui/react-use-size@2.0.10":
version "2.0.10"
resolved "https://registry.yarnpkg.com/@chakra-ui/react-use-size/-/react-use-size-2.0.10.tgz#6131950852490c06e5fb3760bf64097c8057391f"
integrity sha512-fdIkH14GDnKQrtQfxX8N3gxbXRPXEl67Y3zeD9z4bKKcQUAYIMqs0MsPZY+FMpGQw8QqafM44nXfL038aIrC5w==
"@chakra-ui/react-use-size@2.0.9":
version "2.0.9"
resolved "https://registry.yarnpkg.com/@chakra-ui/react-use-size/-/react-use-size-2.0.9.tgz#00717867b98a24c3bdcfaa0c3e70732404193486"
integrity sha512-Jce7QmO1jlQZq+Y77VKckWzroRnajChzUQ8xhLQZO6VbYvrpg3cu+X2QCz3G+MZzB+1/hnvvAqmZ+uJLd8rEJg==
dependencies:
"@zag-js/element-size" "0.3.2"
"@zag-js/element-size" "0.3.1"
"@chakra-ui/react-use-timeout@2.0.5":
version "2.0.5"
@@ -594,69 +594,69 @@
dependencies:
"@chakra-ui/utils" "2.0.15"
"@chakra-ui/react@^2.5.5":
version "2.5.5"
resolved "https://registry.yarnpkg.com/@chakra-ui/react/-/react-2.5.5.tgz#5ae2450ec0d10d63e1314747466f21cf542032ff"
integrity sha512-aBVMUtdWv2MrptD/tKSqICPsuJ+I+jvauegffO1qPUDlK3RrXIDeOHkLGWohgXNcjY5bGVWguFEzJm97//0ooQ==
"@chakra-ui/react@^2.5.1":
version "2.5.1"
resolved "https://registry.yarnpkg.com/@chakra-ui/react/-/react-2.5.1.tgz#05414db2b512bd4402e42eecc6b915d85102c576"
integrity sha512-ugkaqfcNMb9L4TkalWiF3rnqfr0TlUUD46JZaDIZiORVisaSwXTZTQrVfG40VghhaJT28rnC5WtiE8kd567ZBQ==
dependencies:
"@chakra-ui/accordion" "2.1.11"
"@chakra-ui/alert" "2.1.0"
"@chakra-ui/avatar" "2.2.8"
"@chakra-ui/breadcrumb" "2.1.5"
"@chakra-ui/button" "2.0.18"
"@chakra-ui/accordion" "2.1.9"
"@chakra-ui/alert" "2.0.17"
"@chakra-ui/avatar" "2.2.5"
"@chakra-ui/breadcrumb" "2.1.4"
"@chakra-ui/button" "2.0.16"
"@chakra-ui/card" "2.1.6"
"@chakra-ui/checkbox" "2.2.14"
"@chakra-ui/checkbox" "2.2.10"
"@chakra-ui/close-button" "2.0.17"
"@chakra-ui/control-box" "2.0.13"
"@chakra-ui/counter" "2.0.14"
"@chakra-ui/css-reset" "2.1.1"
"@chakra-ui/editable" "2.0.21"
"@chakra-ui/css-reset" "2.0.12"
"@chakra-ui/editable" "2.0.19"
"@chakra-ui/focus-lock" "2.0.16"
"@chakra-ui/form-control" "2.0.18"
"@chakra-ui/form-control" "2.0.17"
"@chakra-ui/hooks" "2.1.6"
"@chakra-ui/icon" "3.0.16"
"@chakra-ui/image" "2.0.15"
"@chakra-ui/input" "2.0.21"
"@chakra-ui/layout" "2.1.18"
"@chakra-ui/input" "2.0.20"
"@chakra-ui/layout" "2.1.16"
"@chakra-ui/live-region" "2.0.13"
"@chakra-ui/media-query" "3.2.12"
"@chakra-ui/menu" "2.1.12"
"@chakra-ui/modal" "2.2.11"
"@chakra-ui/number-input" "2.0.19"
"@chakra-ui/pin-input" "2.0.20"
"@chakra-ui/popover" "2.1.9"
"@chakra-ui/menu" "2.1.9"
"@chakra-ui/modal" "2.2.9"
"@chakra-ui/number-input" "2.0.18"
"@chakra-ui/pin-input" "2.0.19"
"@chakra-ui/popover" "2.1.8"
"@chakra-ui/popper" "3.0.13"
"@chakra-ui/portal" "2.0.16"
"@chakra-ui/progress" "2.1.6"
"@chakra-ui/provider" "2.2.2"
"@chakra-ui/radio" "2.0.22"
"@chakra-ui/portal" "2.0.15"
"@chakra-ui/progress" "2.1.5"
"@chakra-ui/provider" "2.1.2"
"@chakra-ui/radio" "2.0.19"
"@chakra-ui/react-env" "3.0.0"
"@chakra-ui/select" "2.0.19"
"@chakra-ui/select" "2.0.18"
"@chakra-ui/skeleton" "2.0.24"
"@chakra-ui/slider" "2.0.23"
"@chakra-ui/slider" "2.0.21"
"@chakra-ui/spinner" "2.0.13"
"@chakra-ui/stat" "2.0.18"
"@chakra-ui/styled-system" "2.8.0"
"@chakra-ui/switch" "2.0.26"
"@chakra-ui/system" "2.5.5"
"@chakra-ui/table" "2.0.17"
"@chakra-ui/tabs" "2.1.9"
"@chakra-ui/tag" "3.0.0"
"@chakra-ui/textarea" "2.0.19"
"@chakra-ui/theme" "3.0.1"
"@chakra-ui/theme-utils" "2.0.15"
"@chakra-ui/toast" "6.1.1"
"@chakra-ui/tooltip" "2.2.7"
"@chakra-ui/transition" "2.0.16"
"@chakra-ui/stat" "2.0.17"
"@chakra-ui/styled-system" "2.6.1"
"@chakra-ui/switch" "2.0.22"
"@chakra-ui/system" "2.5.1"
"@chakra-ui/table" "2.0.16"
"@chakra-ui/tabs" "2.1.8"
"@chakra-ui/tag" "2.0.17"
"@chakra-ui/textarea" "2.0.18"
"@chakra-ui/theme" "2.2.5"
"@chakra-ui/theme-utils" "2.0.11"
"@chakra-ui/toast" "6.0.1"
"@chakra-ui/tooltip" "2.2.6"
"@chakra-ui/transition" "2.0.15"
"@chakra-ui/utils" "2.0.15"
"@chakra-ui/visually-hidden" "2.0.15"
"@chakra-ui/select@2.0.19":
version "2.0.19"
resolved "https://registry.yarnpkg.com/@chakra-ui/select/-/select-2.0.19.tgz#957e95a17a890d8c0a851e2f00a8d8dd17932d66"
integrity sha512-eAlFh+JhwtJ17OrB6fO6gEAGOMH18ERNrXLqWbYLrs674Le7xuREgtuAYDoxUzvYXYYTTdOJtVbcHGriI3o6rA==
"@chakra-ui/select@2.0.18":
version "2.0.18"
resolved "https://registry.yarnpkg.com/@chakra-ui/select/-/select-2.0.18.tgz#4eb6092610067c1b4131353fe39b4466e251395b"
integrity sha512-1d2lUT5LM6oOs5x4lzBh4GFDuXX62+lr+sgV7099g951/5UNbb0CS2hSZHsO7yZThLNbr7QTWZvAOAayVcGzdw==
dependencies:
"@chakra-ui/form-control" "2.0.18"
"@chakra-ui/form-control" "2.0.17"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/shared-utils@2.0.5":
@@ -673,20 +673,20 @@
"@chakra-ui/react-use-previous" "2.0.5"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/slider@2.0.23":
version "2.0.23"
resolved "https://registry.yarnpkg.com/@chakra-ui/slider/-/slider-2.0.23.tgz#9130c7aee8ca876be64d1aeba6b84fe421c8207b"
integrity sha512-/eyRUXLla+ZdBUPXpakE3SAS2JS8mIJR6qcUYiPVKSpRAi6tMyYeQijAXn2QC1AUVd2JrG8Pz+1Jy7Po3uA7cA==
"@chakra-ui/slider@2.0.21":
version "2.0.21"
resolved "https://registry.yarnpkg.com/@chakra-ui/slider/-/slider-2.0.21.tgz#f65b15bf0d5f827699ff9a2d6faee35006e2bfce"
integrity sha512-Mm76yJxEqJl21+3waEcKg3tM8Y4elJ7mcViN6Brj35PTfzUJfSJxeBGo1nLPJ+X5jLj7o/L4kfBmUk3lY4QYEQ==
dependencies:
"@chakra-ui/number-utils" "2.0.7"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-types" "2.0.7"
"@chakra-ui/react-use-callback-ref" "2.0.7"
"@chakra-ui/react-use-controllable-state" "2.0.8"
"@chakra-ui/react-use-latest-ref" "2.0.5"
"@chakra-ui/react-use-merge-refs" "2.0.7"
"@chakra-ui/react-use-pan-event" "2.0.9"
"@chakra-ui/react-use-size" "2.0.10"
"@chakra-ui/react-use-size" "2.0.9"
"@chakra-ui/react-use-update-effect" "2.0.7"
"@chakra-ui/spinner@2.0.13":
@@ -696,82 +696,82 @@
dependencies:
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/stat@2.0.18":
version "2.0.18"
resolved "https://registry.yarnpkg.com/@chakra-ui/stat/-/stat-2.0.18.tgz#9e5d21d162b7cf2cf92065c19291ead2d4660772"
integrity sha512-wKyfBqhVlIs9bkSerUc6F9KJMw0yTIEKArW7dejWwzToCLPr47u+CtYO6jlJHV6lRvkhi4K4Qc6pyvtJxZ3VpA==
"@chakra-ui/stat@2.0.17":
version "2.0.17"
resolved "https://registry.yarnpkg.com/@chakra-ui/stat/-/stat-2.0.17.tgz#2cd712cc7e0d58d9cbd542deea911f1b0925074f"
integrity sha512-PhD+5oVLWjQmGLfeZSmexp3AtLcaggWBwoMZ4z8QMZIQzf/fJJWMk0bMqxlpTv8ORDkfY/4ImuFB/RJHvcqlcA==
dependencies:
"@chakra-ui/icon" "3.0.16"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/styled-system@2.8.0":
version "2.8.0"
resolved "https://registry.yarnpkg.com/@chakra-ui/styled-system/-/styled-system-2.8.0.tgz#c02aa7b4a15bd826c19d055cd226bd44f7470f26"
integrity sha512-bmRv/8ACJGGKGx84U1npiUddwdNifJ+/ETklGwooS5APM0ymwUtBYZpFxjYNJrqvVYpg3mVY6HhMyBVptLS7iA==
"@chakra-ui/styled-system@2.6.1":
version "2.6.1"
resolved "https://registry.yarnpkg.com/@chakra-ui/styled-system/-/styled-system-2.6.1.tgz#302d496d34c0b7b30c646a7e3c9b113a2f4588da"
integrity sha512-jy/1dVi1LxjoRCm+Eo5mqBgvPy5SCWMlIcz6GbIZBDpkGeKZwtqrZLjekxxLBCy8ORY+kJlUB0FT6AzVR/1tjw==
dependencies:
"@chakra-ui/shared-utils" "2.0.5"
csstype "^3.0.11"
lodash.mergewith "4.6.2"
"@chakra-ui/switch@2.0.26":
version "2.0.26"
resolved "https://registry.yarnpkg.com/@chakra-ui/switch/-/switch-2.0.26.tgz#b93eeafd788e47c21222524adceffe9ef62602d6"
integrity sha512-x62lF6VazSZJQuVxosChVR6+0lIJe8Pxgkl/C9vxjhp2yVYb3mew5tcX/sDOu0dYZy8ro/9hMfGkdN4r9xEU8A==
"@chakra-ui/switch@2.0.22":
version "2.0.22"
resolved "https://registry.yarnpkg.com/@chakra-ui/switch/-/switch-2.0.22.tgz#7b35e2b10ea4cf91fb49f5175b4335c61dcd25b3"
integrity sha512-+/Yy6y7VFD91uSPruF8ZvePi3tl5D8UNVATtWEQ+QBI92DLSM+PtgJ2F0Y9GMZ9NzMxpZ80DqwY7/kqcPCfLvw==
dependencies:
"@chakra-ui/checkbox" "2.2.14"
"@chakra-ui/checkbox" "2.2.10"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/system@2.5.5":
version "2.5.5"
resolved "https://registry.yarnpkg.com/@chakra-ui/system/-/system-2.5.5.tgz#b8b070d07ca9b0190363100396eea02cca754cec"
integrity sha512-52BIp/Zyvefgxn5RTByfkTeG4J+y81LWEjWm8jCaRFsLVm8IFgqIrngtcq4I7gD5n/UKbneHlb4eLHo4uc5yDQ==
"@chakra-ui/system@2.5.1":
version "2.5.1"
resolved "https://registry.yarnpkg.com/@chakra-ui/system/-/system-2.5.1.tgz#bc03a11ae31e795966c7618280548d5cd866f47e"
integrity sha512-4+86OrcSoq7lGkm5fh+sJ3IWXSTzjz+HOllRbCW2Rtnmcg7ritiXVNV2VygEg2DrCcx5+tNqRHDM764zW+AEug==
dependencies:
"@chakra-ui/color-mode" "2.1.12"
"@chakra-ui/object-utils" "2.0.8"
"@chakra-ui/react-utils" "2.0.12"
"@chakra-ui/styled-system" "2.8.0"
"@chakra-ui/theme-utils" "2.0.15"
"@chakra-ui/styled-system" "2.6.1"
"@chakra-ui/theme-utils" "2.0.11"
"@chakra-ui/utils" "2.0.15"
react-fast-compare "3.2.1"
react-fast-compare "3.2.0"
"@chakra-ui/table@2.0.17":
version "2.0.17"
resolved "https://registry.yarnpkg.com/@chakra-ui/table/-/table-2.0.17.tgz#ad394dc6dcbe5a8a9e6d899997ecca3471603977"
integrity sha512-OScheTEp1LOYvTki2NFwnAYvac8siAhW9BI5RKm5f5ORL2gVJo4I72RUqE0aKe1oboxgm7CYt5afT5PS5cG61A==
"@chakra-ui/table@2.0.16":
version "2.0.16"
resolved "https://registry.yarnpkg.com/@chakra-ui/table/-/table-2.0.16.tgz#e69736cba5cfb218c5e40592ad9280c6e32f6fe7"
integrity sha512-vWDXZ6Ad3Aj66curp1tZBHvCfQHX2FJ4ijLiqGgQszWFIchfhJ5vMgEBJaFMZ+BN1draAjuRTZqaQefOApzvRg==
dependencies:
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/tabs@2.1.9":
version "2.1.9"
resolved "https://registry.yarnpkg.com/@chakra-ui/tabs/-/tabs-2.1.9.tgz#2e5214cb453c6cc0c240e82bd88af1042fc6fe0e"
integrity sha512-Yf8e0kRvaGM6jfkJum0aInQ0U3ZlCafmrYYni2lqjcTtThqu+Yosmo3iYlnullXxCw5MVznfrkb9ySvgQowuYg==
"@chakra-ui/tabs@2.1.8":
version "2.1.8"
resolved "https://registry.yarnpkg.com/@chakra-ui/tabs/-/tabs-2.1.8.tgz#e83071380f9a3633810308d45de51be7a74f5eb9"
integrity sha512-B7LeFN04Ny2jsSy5TFOQxnbZ6ITxGxLxsB2PE0vvQjMSblBrUryOxdjw80HZhfiw6od0ikK9CeKQOIt9QCguSw==
dependencies:
"@chakra-ui/clickable" "2.0.14"
"@chakra-ui/descendant" "3.0.14"
"@chakra-ui/descendant" "3.0.13"
"@chakra-ui/lazy-utils" "2.0.5"
"@chakra-ui/react-children-utils" "2.0.6"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-use-controllable-state" "2.0.8"
"@chakra-ui/react-use-merge-refs" "2.0.7"
"@chakra-ui/react-use-safe-layout-effect" "2.0.5"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/tag@3.0.0":
version "3.0.0"
resolved "https://registry.yarnpkg.com/@chakra-ui/tag/-/tag-3.0.0.tgz#d86cdab59bb3ff7fc628c2dbe7a5ff1b36bd3e96"
integrity sha512-YWdMmw/1OWRwNkG9pX+wVtZio+B89odaPj6XeMn5nfNN8+jyhIEpouWv34+CO9G0m1lupJTxPSfgLAd7cqXZMA==
"@chakra-ui/tag@2.0.17":
version "2.0.17"
resolved "https://registry.yarnpkg.com/@chakra-ui/tag/-/tag-2.0.17.tgz#97adb86db190ddb3526060b78c590392e0ac8b4c"
integrity sha512-A47zE9Ft9qxOJ+5r1cUseKRCoEdqCRzFm0pOtZgRcckqavglk75Xjgz8HbBpUO2zqqd49MlqdOwR8o87fXS1vg==
dependencies:
"@chakra-ui/icon" "3.0.16"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/textarea@2.0.19":
version "2.0.19"
resolved "https://registry.yarnpkg.com/@chakra-ui/textarea/-/textarea-2.0.19.tgz#470b459f9cb3255d2abbe07d46b0a5b60a6a32c5"
integrity sha512-adJk+qVGsFeJDvfn56CcJKKse8k7oMGlODrmpnpTdF+xvlsiTM+1GfaJvgNSpHHuQFdz/A0z1uJtfGefk0G2ZA==
"@chakra-ui/textarea@2.0.18":
version "2.0.18"
resolved "https://registry.yarnpkg.com/@chakra-ui/textarea/-/textarea-2.0.18.tgz#da6d629b465f65bbc7b48039c2e48a4ae1d853b4"
integrity sha512-aGHHb29vVifO0OtcK/k8cMykzjOKo/coDTU0NJqz7OOLAWIMNV2eGenvmO1n9tTZbmbqHiX+Sa1nPRX+pd14lg==
dependencies:
"@chakra-ui/form-control" "2.0.18"
"@chakra-ui/form-control" "2.0.17"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/theme-tools@2.0.17":
@@ -783,57 +783,57 @@
"@chakra-ui/shared-utils" "2.0.5"
color2k "^2.0.0"
"@chakra-ui/theme-utils@2.0.15":
version "2.0.15"
resolved "https://registry.yarnpkg.com/@chakra-ui/theme-utils/-/theme-utils-2.0.15.tgz#968a5e8c47bb403323fe67049c7b751a6e47f069"
integrity sha512-UuxtEgE7gwMTGDXtUpTOI7F5X0iHB9ekEOG5PWPn2wWBL7rlk2JtPI7UP5Um5Yg6vvBfXYGK1ySahxqsgf+87g==
"@chakra-ui/theme-utils@2.0.11":
version "2.0.11"
resolved "https://registry.yarnpkg.com/@chakra-ui/theme-utils/-/theme-utils-2.0.11.tgz#c01b1d14fdd63326d1ad11fd8f0872921ea43872"
integrity sha512-lBAay6Sq3/fl7exd3mFxWAbzgdQowytor0fnlHrpNStn1HgFjXukwsf6356XQOie2Vd8qaMM7qZtMh4AiC0dcg==
dependencies:
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/styled-system" "2.8.0"
"@chakra-ui/theme" "3.0.1"
"@chakra-ui/styled-system" "2.6.1"
"@chakra-ui/theme" "2.2.5"
lodash.mergewith "4.6.2"
"@chakra-ui/theme@3.0.1":
version "3.0.1"
resolved "https://registry.yarnpkg.com/@chakra-ui/theme/-/theme-3.0.1.tgz#151fc5d1e23d0fd0cd29d28acf8f6017269c13fc"
integrity sha512-92kDm/Ux/51uJqhRKevQo/O/rdwucDYcpHg2QuwzdAxISCeYvgtl2TtgOOl5EnqEP0j3IEAvZHZUlv8TTbawaw==
"@chakra-ui/theme@2.2.5":
version "2.2.5"
resolved "https://registry.yarnpkg.com/@chakra-ui/theme/-/theme-2.2.5.tgz#18ed1755ff27c1ff1f1a77083ffc546c361c926e"
integrity sha512-hYASZMwu0NqEv6PPydu+F3I+kMNd44yR4TwjR/lXBz/LEh64L6UPY6kQjebCfgdVtsGdl3HKg+eLlfa7SvfRgw==
dependencies:
"@chakra-ui/anatomy" "2.1.2"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/theme-tools" "2.0.17"
"@chakra-ui/toast@6.1.1":
version "6.1.1"
resolved "https://registry.yarnpkg.com/@chakra-ui/toast/-/toast-6.1.1.tgz#7ca78f38069bc87fa75b64de76c8fc2758bdf419"
integrity sha512-JtjIKkPVjEu8okGGCipCxNVgK/15h5AicTATZ6RbG2MsHmr4GfKG3fUCvpbuZseArqmLqGLQZQJjVE9vJzaSkQ==
"@chakra-ui/toast@6.0.1":
version "6.0.1"
resolved "https://registry.yarnpkg.com/@chakra-ui/toast/-/toast-6.0.1.tgz#726b67a57cdd592320bb3f450c66d007a2a1d902"
integrity sha512-ej2kJXvu/d2h6qnXU5D8XTyw0qpsfmbiU7hUffo/sPxkz89AUOQ08RUuUmB1ssW/FZcQvNMJ5WgzCTKHGBxtxw==
dependencies:
"@chakra-ui/alert" "2.1.0"
"@chakra-ui/alert" "2.0.17"
"@chakra-ui/close-button" "2.0.17"
"@chakra-ui/portal" "2.0.16"
"@chakra-ui/react-context" "2.0.8"
"@chakra-ui/portal" "2.0.15"
"@chakra-ui/react-context" "2.0.7"
"@chakra-ui/react-use-timeout" "2.0.5"
"@chakra-ui/react-use-update-effect" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/styled-system" "2.8.0"
"@chakra-ui/theme" "3.0.1"
"@chakra-ui/styled-system" "2.6.1"
"@chakra-ui/theme" "2.2.5"
"@chakra-ui/tooltip@2.2.7":
version "2.2.7"
resolved "https://registry.yarnpkg.com/@chakra-ui/tooltip/-/tooltip-2.2.7.tgz#7c305efb057a5fe4694b1b8d82395aec776d8f57"
integrity sha512-ImUJ6NnVqARaYqpgtO+kzucDRmxo8AF3jMjARw0bx2LxUkKwgRCOEaaRK5p5dHc0Kr6t5/XqjDeUNa19/sLauA==
"@chakra-ui/tooltip@2.2.6":
version "2.2.6"
resolved "https://registry.yarnpkg.com/@chakra-ui/tooltip/-/tooltip-2.2.6.tgz#a38f9ff2dd8a574c8cf49526c3846533455f8ddd"
integrity sha512-4cbneidZ5+HCWge3OZzewRQieIvhDjSsl+scrl4Scx7E0z3OmqlTIESU5nGIZDBLYqKn/UirEZhqaQ33FOS2fw==
dependencies:
"@chakra-ui/popper" "3.0.13"
"@chakra-ui/portal" "2.0.16"
"@chakra-ui/portal" "2.0.15"
"@chakra-ui/react-types" "2.0.7"
"@chakra-ui/react-use-disclosure" "2.0.8"
"@chakra-ui/react-use-event-listener" "2.0.7"
"@chakra-ui/react-use-merge-refs" "2.0.7"
"@chakra-ui/shared-utils" "2.0.5"
"@chakra-ui/transition@2.0.16":
version "2.0.16"
resolved "https://registry.yarnpkg.com/@chakra-ui/transition/-/transition-2.0.16.tgz#498c91e6835bb5d950fd1d1402f483b85f7dcd87"
integrity sha512-E+RkwlPc3H7P1crEXmXwDXMB2lqY2LLia2P5siQ4IEnRWIgZXlIw+8Em+NtHNgusel2N+9yuB0wT9SeZZeZ3CQ==
"@chakra-ui/transition@2.0.15":
version "2.0.15"
resolved "https://registry.yarnpkg.com/@chakra-ui/transition/-/transition-2.0.15.tgz#c640df2ea82f5ad58c55a6e1a7c338f377cb96d8"
integrity sha512-o9LBK/llQfUDHF/Ty3cQ6nShpekKTqHUoJlUOzNKhoTsNpoRerr9v0jwojrX1YI02KtVjfhFU6PiqXlDfREoNw==
dependencies:
"@chakra-ui/shared-utils" "2.0.5"
@@ -1081,18 +1081,6 @@
resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.16.17.tgz#c5a1a4bfe1b57f0c3e61b29883525c6da3e5c091"
integrity sha512-y+EHuSchhL7FjHgvQL/0fnnFmO4T1bhvWANX6gcnqTjtnKWbTvUMCpGnv2+t+31d7RzyEAYAd4u2fnIhHL6N/Q==
"@eslint-community/eslint-utils@^4.2.0":
version "4.4.0"
resolved "https://registry.yarnpkg.com/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz#a23514e8fb9af1269d5f7788aa556798d61c6b59"
integrity sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==
dependencies:
eslint-visitor-keys "^3.3.0"
"@eslint-community/regexpp@^4.4.0":
version "4.5.0"
resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.5.0.tgz#f6f729b02feee2c749f57e334b7a1b5f40a81724"
integrity sha512-vITaYzIcNmjn5tF5uxcZ/ft7/RXGrMUIS9HalWckEOF6ESiwXKoMzAQf2UW0aVd6rnOeExTJVd5hmWXucBKGXQ==
"@eslint/eslintrc@^1.4.1":
version "1.4.1"
resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-1.4.1.tgz#af58772019a2d271b7e2d4c23ff4ddcba3ccfb3e"
@@ -1780,47 +1768,47 @@
resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-9.0.0.tgz#53ef263e5239728b56096b0a869595135b7952d2"
integrity sha512-kr90f+ERiQtKWMz5rP32ltJ/BtULDI5RVO0uavn1HQUOwjx0R1h0rnDYNL0CepF1zL5bSY6FISAfd9tOdDhU5Q==
"@typescript-eslint/eslint-plugin@^5.57.0":
version "5.57.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.57.0.tgz#52c8a7a4512f10e7249ca1e2e61f81c62c34365c"
integrity sha512-itag0qpN6q2UMM6Xgk6xoHa0D0/P+M17THnr4SVgqn9Rgam5k/He33MA7/D7QoJcdMxHFyX7U9imaBonAX/6qA==
"@typescript-eslint/eslint-plugin@^5.52.0":
version "5.52.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.52.0.tgz#5fb0d43574c2411f16ea80f5fc335b8eaa7b28a8"
integrity sha512-lHazYdvYVsBokwCdKOppvYJKaJ4S41CgKBcPvyd0xjZNbvQdhn/pnJlGtQksQ/NhInzdaeaSarlBjDXHuclEbg==
dependencies:
"@eslint-community/regexpp" "^4.4.0"
"@typescript-eslint/scope-manager" "5.57.0"
"@typescript-eslint/type-utils" "5.57.0"
"@typescript-eslint/utils" "5.57.0"
"@typescript-eslint/scope-manager" "5.52.0"
"@typescript-eslint/type-utils" "5.52.0"
"@typescript-eslint/utils" "5.52.0"
debug "^4.3.4"
grapheme-splitter "^1.0.4"
ignore "^5.2.0"
natural-compare-lite "^1.4.0"
regexpp "^3.2.0"
semver "^7.3.7"
tsutils "^3.21.0"
"@typescript-eslint/parser@^5.57.0":
version "5.57.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-5.57.0.tgz#f675bf2cd1a838949fd0de5683834417b757e4fa"
integrity sha512-orrduvpWYkgLCyAdNtR1QIWovcNZlEm6yL8nwH/eTxWLd8gsP+25pdLHYzL2QdkqrieaDwLpytHqycncv0woUQ==
"@typescript-eslint/parser@^5.52.0":
version "5.52.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-5.52.0.tgz#73c136df6c0133f1d7870de7131ccf356f5be5a4"
integrity sha512-e2KiLQOZRo4Y0D/b+3y08i3jsekoSkOYStROYmPUnGMEoA0h+k2qOH5H6tcjIc68WDvGwH+PaOrP1XRzLJ6QlA==
dependencies:
"@typescript-eslint/scope-manager" "5.57.0"
"@typescript-eslint/types" "5.57.0"
"@typescript-eslint/typescript-estree" "5.57.0"
"@typescript-eslint/scope-manager" "5.52.0"
"@typescript-eslint/types" "5.52.0"
"@typescript-eslint/typescript-estree" "5.52.0"
debug "^4.3.4"
"@typescript-eslint/scope-manager@5.57.0":
version "5.57.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-5.57.0.tgz#79ccd3fa7bde0758059172d44239e871e087ea36"
integrity sha512-NANBNOQvllPlizl9LatX8+MHi7bx7WGIWYjPHDmQe5Si/0YEYfxSljJpoTyTWFTgRy3X8gLYSE4xQ2U+aCozSw==
"@typescript-eslint/scope-manager@5.52.0":
version "5.52.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-5.52.0.tgz#a993d89a0556ea16811db48eabd7c5b72dcb83d1"
integrity sha512-AR7sxxfBKiNV0FWBSARxM8DmNxrwgnYMPwmpkC1Pl1n+eT8/I2NAUPuwDy/FmDcC6F8pBfmOcaxcxRHspgOBMw==
dependencies:
"@typescript-eslint/types" "5.57.0"
"@typescript-eslint/visitor-keys" "5.57.0"
"@typescript-eslint/types" "5.52.0"
"@typescript-eslint/visitor-keys" "5.52.0"
"@typescript-eslint/type-utils@5.57.0":
version "5.57.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-5.57.0.tgz#98e7531c4e927855d45bd362de922a619b4319f2"
integrity sha512-kxXoq9zOTbvqzLbdNKy1yFrxLC6GDJFE2Yuo3KqSwTmDOFjUGeWSakgoXT864WcK5/NAJkkONCiKb1ddsqhLXQ==
"@typescript-eslint/type-utils@5.52.0":
version "5.52.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-5.52.0.tgz#9fd28cd02e6f21f5109e35496df41893f33167aa"
integrity sha512-tEKuUHfDOv852QGlpPtB3lHOoig5pyFQN/cUiZtpw99D93nEBjexRLre5sQZlkMoHry/lZr8qDAt2oAHLKA6Jw==
dependencies:
"@typescript-eslint/typescript-estree" "5.57.0"
"@typescript-eslint/utils" "5.57.0"
"@typescript-eslint/typescript-estree" "5.52.0"
"@typescript-eslint/utils" "5.52.0"
debug "^4.3.4"
tsutils "^3.21.0"
@@ -1834,18 +1822,13 @@
resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-5.52.0.tgz#19e9abc6afb5bd37a1a9bea877a1a836c0b3241b"
integrity sha512-oV7XU4CHYfBhk78fS7tkum+/Dpgsfi91IIDy7fjCyq2k6KB63M6gMC0YIvy+iABzmXThCRI6xpCEyVObBdWSDQ==
"@typescript-eslint/types@5.57.0":
version "5.57.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-5.57.0.tgz#727bfa2b64c73a4376264379cf1f447998eaa132"
integrity sha512-mxsod+aZRSyLT+jiqHw1KK6xrANm19/+VFALVFP5qa/aiJnlP38qpyaTd0fEKhWvQk6YeNZ5LGwI1pDpBRBhtQ==
"@typescript-eslint/typescript-estree@5.57.0":
version "5.57.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-5.57.0.tgz#ebcd0ee3e1d6230e888d88cddf654252d41e2e40"
integrity sha512-LTzQ23TV82KpO8HPnWuxM2V7ieXW8O142I7hQTxWIHDcCEIjtkat6H96PFkYBQqGFLW/G/eVVOB9Z8rcvdY/Vw==
"@typescript-eslint/typescript-estree@5.52.0", "@typescript-eslint/typescript-estree@^5.13.0":
version "5.52.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-5.52.0.tgz#6408cb3c2ccc01c03c278cb201cf07e73347dfca"
integrity sha512-WeWnjanyEwt6+fVrSR0MYgEpUAuROxuAH516WPjUblIrClzYJj0kBbjdnbQXLpgAN8qbEuGywiQsXUVDiAoEuQ==
dependencies:
"@typescript-eslint/types" "5.57.0"
"@typescript-eslint/visitor-keys" "5.57.0"
"@typescript-eslint/types" "5.52.0"
"@typescript-eslint/visitor-keys" "5.52.0"
debug "^4.3.4"
globby "^11.1.0"
is-glob "^4.0.3"
@@ -1865,31 +1848,18 @@
semver "^7.3.5"
tsutils "^3.21.0"
"@typescript-eslint/typescript-estree@^5.13.0":
"@typescript-eslint/utils@5.52.0":
version "5.52.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-5.52.0.tgz#6408cb3c2ccc01c03c278cb201cf07e73347dfca"
integrity sha512-WeWnjanyEwt6+fVrSR0MYgEpUAuROxuAH516WPjUblIrClzYJj0kBbjdnbQXLpgAN8qbEuGywiQsXUVDiAoEuQ==
resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-5.52.0.tgz#b260bb5a8f6b00a0ed51db66bdba4ed5e4845a72"
integrity sha512-As3lChhrbwWQLNk2HC8Ree96hldKIqk98EYvypd3It8Q1f8d5zWyIoaZEp2va5667M4ZyE7X8UUR+azXrFl+NA==
dependencies:
"@typescript-eslint/types" "5.52.0"
"@typescript-eslint/visitor-keys" "5.52.0"
debug "^4.3.4"
globby "^11.1.0"
is-glob "^4.0.3"
semver "^7.3.7"
tsutils "^3.21.0"
"@typescript-eslint/utils@5.57.0":
version "5.57.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-5.57.0.tgz#eab8f6563a2ac31f60f3e7024b91bf75f43ecef6"
integrity sha512-ps/4WohXV7C+LTSgAL5CApxvxbMkl9B9AUZRtnEFonpIxZDIT7wC1xfvuJONMidrkB9scs4zhtRyIwHh4+18kw==
dependencies:
"@eslint-community/eslint-utils" "^4.2.0"
"@types/json-schema" "^7.0.9"
"@types/semver" "^7.3.12"
"@typescript-eslint/scope-manager" "5.57.0"
"@typescript-eslint/types" "5.57.0"
"@typescript-eslint/typescript-estree" "5.57.0"
"@typescript-eslint/scope-manager" "5.52.0"
"@typescript-eslint/types" "5.52.0"
"@typescript-eslint/typescript-estree" "5.52.0"
eslint-scope "^5.1.1"
eslint-utils "^3.0.0"
semver "^7.3.7"
"@typescript-eslint/visitor-keys@4.33.0":
@@ -1908,14 +1878,6 @@
"@typescript-eslint/types" "5.52.0"
eslint-visitor-keys "^3.3.0"
"@typescript-eslint/visitor-keys@5.57.0":
version "5.57.0"
resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-5.57.0.tgz#e2b2f4174aff1d15eef887ce3d019ecc2d7a8ac1"
integrity sha512-ery2g3k0hv5BLiKpPuwYt9KBkAp2ugT6VvyShXdLOkax895EC55sP0Tx5L0fZaQueiK3fBLvHVvEl3jFS5ia+g==
dependencies:
"@typescript-eslint/types" "5.57.0"
eslint-visitor-keys "^3.3.0"
"@vitejs/plugin-react-swc@^3.2.0":
version "3.2.0"
resolved "https://registry.yarnpkg.com/@vitejs/plugin-react-swc/-/plugin-react-swc-3.2.0.tgz#7c4f6e116a296c27f680d05750f9dbf798cf7709"
@@ -1928,15 +1890,15 @@
resolved "https://registry.yarnpkg.com/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz#e77a97fbd345b76d83245edcd17d393b1b41fb31"
integrity sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==
"@zag-js/element-size@0.3.2":
version "0.3.2"
resolved "https://registry.yarnpkg.com/@zag-js/element-size/-/element-size-0.3.2.tgz#ebb76af2a024230482406db41344598d1a9f54f4"
integrity sha512-bVvvigUGvAuj7PCkE5AbzvTJDTw5f3bg9nQdv+ErhVN8SfPPppLJEmmWdxqsRzrHXgx8ypJt/+Ty0kjtISVDsQ==
"@zag-js/element-size@0.3.1":
version "0.3.1"
resolved "https://registry.yarnpkg.com/@zag-js/element-size/-/element-size-0.3.1.tgz#f9f6ae98355e2250d18d0f6e2f1134a0ae4c6a2f"
integrity sha512-jR5j4G//bRzcxwAACWi9EfITnwjNmn10LxF4NmALrdZU7/PNWP3uUCdhCxd/0SCyeiJXUl0yvD57rWAbKPs1nw==
"@zag-js/focus-visible@0.2.2":
version "0.2.2"
resolved "https://registry.yarnpkg.com/@zag-js/focus-visible/-/focus-visible-0.2.2.tgz#56233480ca1275d3218fb2e10696a33d1a6b9e64"
integrity sha512-0j2gZq8HiZ51z4zNnSkF1iSkqlwRDvdH+son3wHdoz+7IUdMN/5Exd4TxMJ+gq2Of1DiXReYLL9qqh2PdQ4wgA==
"@zag-js/focus-visible@0.2.1":
version "0.2.1"
resolved "https://registry.yarnpkg.com/@zag-js/focus-visible/-/focus-visible-0.2.1.tgz#bf4f1009f4fd35a9728dfaa9214d8cb318fe8b1e"
integrity sha512-19uTjoZGP4/Ax7kSNhhay9JA83BirKzpqLkeEAilrpdI1hE5xuq6q+tzJOsrMOOqJrm7LkmZp5lbsTQzvK2pYg==
accepts@~1.3.4:
version "1.3.8"
@@ -4548,10 +4510,10 @@ react-dropzone@^14.2.3:
file-selector "^0.6.0"
prop-types "^15.8.1"
react-fast-compare@3.2.1:
version "3.2.1"
resolved "https://registry.yarnpkg.com/react-fast-compare/-/react-fast-compare-3.2.1.tgz#53933d9e14f364281d6cba24bfed7a4afb808b5f"
integrity sha512-xTYf9zFim2pEif/Fw16dBiXpe0hoy5PxcD8+OwBnTtNLfIm3g6WxhKNurY+6OmdH1u6Ta/W/Vl6vjbYP1MFnDg==
react-fast-compare@3.2.0:
version "3.2.0"
resolved "https://registry.yarnpkg.com/react-fast-compare/-/react-fast-compare-3.2.0.tgz#641a9da81b6a6320f270e89724fb45a0b39e43bb"
integrity sha512-rtGImPZ0YyLrscKI9xTpV8psd6I8VAtjKCzQDlzyDvqJA8XOW78TXYQwNRNd8g8JZnDu8q9Fu/1v4HPAVwVdHA==
react-fast-compare@^2.0.1:
version "2.0.4"
@@ -5349,11 +5311,6 @@ typescript@^4.0.0, typescript@^4.5.5:
resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.9.5.tgz#095979f9bcc0d09da324d58d03ce8f8374cbe65a"
integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==
typescript@^5.0.3:
version "5.0.3"
resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.0.3.tgz#fe976f0c826a88d0a382007681cbb2da44afdedf"
integrity sha512-xv8mOEDnigb/tN9PSMTwSEqAnUvkoXMQlicOb0IUVDBSQCgBSaAAROUZYy2IcUy5qU6XajK5jjjO7TMWqBTKZA==
unbox-primitive@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.2.tgz#29032021057d5e6cdbd08c5129c226dff8ed6f9e"

View File

@@ -22,7 +22,6 @@ import transformers
from diffusers.pipeline_utils import DiffusionPipeline
from diffusers.utils.import_utils import is_xformers_available
from omegaconf import OmegaConf
from pathlib import Path
from PIL import Image, ImageOps
from pytorch_lightning import logging, seed_everything
@@ -201,8 +200,6 @@ class Generate:
# it wasn't actually doing anything. This logic could be reinstated.
self.device = torch.device(choose_torch_device())
print(f">> Using device_type {self.device.type}")
if self.device.type == 'cuda':
print(f">> CUDA device '{torch.cuda.get_device_name(torch.cuda.current_device())}' (GPU {os.environ.get('CUDA_VISIBLE_DEVICES') or 0})")
if full_precision:
if self.precision != "auto":
raise ValueError("Remove --full_precision / -F if using --precision")
@@ -529,11 +526,6 @@ class Generate:
log_tokens=self.log_tokenization,
)
# untested code, so commented out for now
# if self.model.peft_manager:
# self.model = self.model.peft_manager.load(self.model, self.model.unet.dtype)
init_image, mask_image = self._make_images(
init_img,
init_mask,
@@ -915,8 +907,12 @@ class Generate:
return self._load_generator(".omnibus", "Omnibus")
def _load_generator(self, module, class_name):
mn = f"ldm.invoke.generator{module}"
cn = class_name
if self.is_legacy_model(self.model_name):
mn = f"ldm.invoke.ckpt_generator{module}"
cn = f"Ckpt{class_name}"
else:
mn = f"ldm.invoke.generator{module}"
cn = class_name
module = importlib.import_module(mn)
constructor = getattr(module, cn)
return constructor(self.model, self.precision)
@@ -978,7 +974,7 @@ class Generate:
self.generators = {}
seed_everything(random.randrange(0, np.iinfo(np.uint32).max))
if self.embedding_path and not model_data.get("ti_embeddings_loaded"):
if self.embedding_path is not None:
print(f'>> Loading embeddings from {self.embedding_path}')
for root, _, files in os.walk(self.embedding_path):
for name in files:
@@ -986,24 +982,14 @@ class Generate:
self.model.textual_inversion_manager.load_textual_inversion(
ti_path, defer_injecting_tokens=True
)
model_data["ti_embeddings_loaded"] = True
print(
f'>> Textual inversion triggers: {", ".join(sorted(self.model.textual_inversion_manager.get_all_trigger_strings()))}'
)
print(
f'>> Textual inversion triggers: {", ".join(sorted(self.model.textual_inversion_manager.get_all_trigger_strings()))}'
)
self.model_name = model_name
self._set_sampler() # requires self.model_name to be set first
self._save_last_used_model(model_name)
return self.model
def _save_last_used_model(self,model_name:str):
"""
Save name of the last model used.
"""
model_file_path = Path(Globals.root,'.last_model')
with open(model_file_path,'w') as f:
f.write(model_name)
def load_huggingface_concepts(self, concepts: list[str]):
self.model.textual_inversion_manager.load_huggingface_concepts(concepts)
@@ -1044,8 +1030,6 @@ class Generate:
image_callback=None,
prefix=None,
):
results = []
for r in image_list:
image, seed = r
try:
@@ -1099,10 +1083,6 @@ class Generate:
else:
r[0] = image
results.append([image, seed])
return results
def apply_textmask(
self, image_path: str, prompt: str, callback, threshold: float = 0.5
):
@@ -1131,6 +1111,9 @@ class Generate:
def sample_to_lowres_estimated_image(self, samples):
return self._make_base().sample_to_lowres_estimated_image(samples)
def is_legacy_model(self, model_name) -> bool:
return self.model_manager.is_legacy(model_name)
def _set_sampler(self):
if isinstance(self.model, DiffusionPipeline):
return self._set_scheduler()

View File

@@ -4,7 +4,6 @@ import shlex
import sys
import traceback
from argparse import Namespace
from packaging import version
from pathlib import Path
from typing import Union
@@ -17,15 +16,13 @@ if sys.platform == "darwin":
import pyparsing # type: ignore
print(f'DEBUG: [1] All system modules imported', file=sys.stderr)
import ldm.invoke
from ..generate import Generate
from .args import (Args, dream_cmd_from_png, metadata_dumps,
metadata_from_png)
from .generator.diffusers_pipeline import PipelineIntermediateState
from .globals import Globals, global_config_dir
from .globals import Globals
from .image_util import make_grid
from .log import write_log
from .model_manager import ModelManager
@@ -33,21 +30,14 @@ from .pngwriter import PngWriter, retrieve_metadata, write_metadata
from .readline import Completer, get_completer
from ..util import url_attachment_name
print(f'DEBUG: [2] All invokeai modules imported', file=sys.stderr)
# global used in multiple functions (fix)
infile = None
def main():
"""Initialize command-line parsers and the diffusion model"""
global infile
print('DEBUG: [3] Entered main()', file=sys.stderr)
print('DEBUG: INVOKEAI ENVIRONMENT:')
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
opt = Args()
args = opt.parse_args()
if not args:
@@ -74,14 +64,7 @@ def main():
Globals.internet_available = args.internet_available and check_internet()
Globals.disable_xformers = not args.xformers
Globals.sequential_guidance = args.sequential_guidance
Globals.ckpt_convert = True # always true as of 2.3.4 for LoRA support
print(f'DEBUG: [4] Globals initialized', file=sys.stderr)
# run any post-install patches needed
run_patches()
print(f'DEBUG: [5] Patches run', file=sys.stderr)
Globals.ckpt_convert = args.ckpt_convert
print(f">> Internet connectivity is {Globals.internet_available}")
@@ -98,9 +81,8 @@ def main():
# loading here to avoid long delays on startup
# these two lines prevent a horrible warning message from appearing
# when the frozen CLIP tokenizer is imported
print(f'DEBUG: [6] Importing torch modules', file=sys.stderr)
import transformers # type: ignore
from ldm.generate import Generate
transformers.logging.set_verbosity_error()
@@ -108,7 +90,6 @@ def main():
diffusers.logging.set_verbosity_error()
print(f'DEBUG: [7] loading restoration models', file=sys.stderr)
# Loading Face Restoration and ESRGAN Modules
gfpgan, codeformer, esrgan = load_face_restoration(opt)
@@ -126,11 +107,7 @@ def main():
else:
embedding_path = None
if opt.lora_path:
Globals.lora_models_dir = opt.lora_path
# migrate legacy models
print(f'DEBUG: [8] migrating models', file=sys.stderr)
ModelManager.migrate_models()
# load the infile as a list of lines
@@ -146,14 +123,11 @@ def main():
print(f"{e}. Aborting.")
sys.exit(-1)
model = opt.model or retrieve_last_used_model()
print(f'DEBUG: [9] Creating generate object', file=sys.stderr)
# creating a Generate object:
try:
gen = Generate(
conf=opt.conf,
model=model,
model=opt.model,
sampler_name=opt.sampler_name,
embedding_path=embedding_path,
full_precision=opt.full_precision,
@@ -175,7 +149,6 @@ def main():
print(">> changed to seamless tiling mode")
# preload the model
print(f'DEBUG: [10] Loading default model', file=sys.stderr)
try:
gen.load_model()
except KeyError:
@@ -183,20 +156,15 @@ def main():
except Exception as e:
report_model_error(opt, e)
# completer is the readline object
completer = get_completer(opt, models=gen.model_manager.list_models())
# try to autoconvert new models
if path := opt.autoimport:
gen.model_manager.heuristic_import(
str(path),
commit_to_conf=opt.conf,
config_file_callback=lambda x: _pick_configuration_file(completer,x),
str(path), convert=False, commit_to_conf=opt.conf
)
if path := opt.autoconvert:
gen.model_manager.heuristic_import(
str(path), commit_to_conf=opt.conf
str(path), convert=True, commit_to_conf=opt.conf
)
# web server loops forever
@@ -210,7 +178,7 @@ def main():
)
try:
main_loop(gen, opt, completer)
main_loop(gen, opt)
except KeyboardInterrupt:
print(
f'\nGoodbye!\nYou can start InvokeAI again by running the "invoke.bat" (or "invoke.sh") script from {Globals.root}'
@@ -221,9 +189,8 @@ def main():
# TODO: main_loop() has gotten busy. Needs to be refactored.
def main_loop(gen, opt, completer):
def main_loop(gen, opt):
"""prompt/read/execute loop"""
print(f'DEBUG: [11] In main loop', file=sys.stderr)
global infile
done = False
doneAfterInFile = infile is not None
@@ -233,6 +200,7 @@ def main_loop(gen, opt, completer):
# The readline completer reads history from the .dream_history file located in the
# output directory specified at the time of script launch. We do not currently support
# changing the history file midstream when the output directory is changed.
completer = get_completer(opt, models=gen.model_manager.list_models())
set_default_output_dir(opt, completer)
if gen.model:
add_embedding_terms(gen, completer)
@@ -421,7 +389,6 @@ def main_loop(gen, opt, completer):
prior_variations,
postprocessed,
first_seed,
gen.model_name,
)
path = file_writer.save_image_and_prompt_to_png(
image=image,
@@ -435,7 +402,6 @@ def main_loop(gen, opt, completer):
else first_seed
],
model_hash=gen.model_hash,
model_id=gen.model_name,
),
name=filename,
compress_level=opt.png_compression,
@@ -659,7 +625,7 @@ def set_default_output_dir(opt: Args, completer: Completer):
completer.set_default_dir(opt.outdir)
def import_model(model_path: str, gen, opt, completer):
def import_model(model_path: str, gen, opt, completer, convert=False):
"""
model_path can be (1) a URL to a .ckpt file; (2) a local .ckpt file path;
(3) a huggingface repository id; or (4) a local directory containing a
@@ -690,10 +656,11 @@ def import_model(model_path: str, gen, opt, completer):
model_path,
model_name=model_name,
description=model_desc,
config_file_callback=lambda x: _pick_configuration_file(completer,x),
convert=convert,
)
if not imported_name:
print("** Aborting import.")
print("** Import failed or was skipped")
return
if not _verify_load(imported_name, gen):
@@ -707,48 +674,6 @@ def import_model(model_path: str, gen, opt, completer):
completer.update_models(gen.model_manager.list_models())
print(f">> {imported_name} successfully installed")
def _pick_configuration_file(completer, checkpoint_path: Path)->Path:
print(
f"""
Please select the type of the model at checkpoint {checkpoint_path}:
[1] A Stable Diffusion v1.x ckpt/safetensors model
[2] A Stable Diffusion v1.x inpainting ckpt/safetensors model
[3] A Stable Diffusion v2.x base model (512 pixels; there should be no 'parameterization:' line in its yaml file)
[4] A Stable Diffusion v2.x v-predictive model (768 pixels; look for a 'parameterization: "v"' line in its yaml file)
[5] Other (you will be prompted to enter the config file path)
[Q] I have no idea! Skip the import.
""")
choices = [
global_config_dir() / 'stable-diffusion' / x
for x in [
'v1-inference.yaml',
'v1-inpainting-inference.yaml',
'v2-inference.yaml',
'v2-inference-v.yaml',
]
]
ok = False
while not ok:
try:
choice = input('select 0-5, Q > ').strip()
if choice.startswith(('q','Q')):
return
if choice == '5':
completer.complete_extensions(('.yaml'))
choice = Path(input('Select config file for this model> ').strip()).absolute()
completer.complete_extensions(None)
ok = choice.exists()
else:
choice = choices[int(choice)-1]
ok = True
except (ValueError, IndexError):
print(f'{choice} is not a valid choice')
except EOFError:
return
return choice
def _verify_load(model_name: str, gen) -> bool:
print(">> Verifying that new model loads...")
current_model = gen.model_name
@@ -795,10 +720,14 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
original_config_file = Path(model_info["config"])
model_name = model_name_or_path
model_description = model_info["description"]
vae_path = model_info.get("vae")
vae = model_info["vae"]
else:
print(f"** {model_name_or_path} is not a legacy .ckpt weights file")
return
if vae_repo := ldm.invoke.model_manager.VAE_TO_REPO_ID.get(Path(vae).stem):
vae_repo = dict(repo_id=vae_repo)
else:
vae_repo = None
model_name = manager.convert_and_import(
ckpt_path,
diffusers_path=Path(
@@ -807,16 +736,16 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
model_name=model_name,
model_description=model_description,
original_config_file=original_config_file,
vae_path=vae_path,
vae=vae_repo,
)
else:
try:
import_model(model_name_or_path, gen, opt, completer)
import_model(model_name_or_path, gen, opt, completer, convert=True)
except KeyboardInterrupt:
return
manager.commit(opt.conf)
if ckpt_path and click.confirm(f"Delete the original .ckpt file at {ckpt_path}?", default=False):
manager.commit(opt.conf)
if click.confirm(f"Delete the original .ckpt file at {ckpt_path}?", default=False):
ckpt_path.unlink(missing_ok=True)
print(f"{ckpt_path} deleted")
@@ -853,7 +782,6 @@ def edit_model(model_name: str, gen, opt, completer):
print(f"\n>> Editing model {model_name} from configuration file {opt.conf}")
new_name = _get_model_name(manager.list_models(), completer, model_name)
completer.complete_extensions(('.yaml','.ckpt','.safetensors','.pt'))
for attribute in info.keys():
if type(info[attribute]) != str:
continue
@@ -861,7 +789,6 @@ def edit_model(model_name: str, gen, opt, completer):
continue
completer.set_line(info[attribute])
info[attribute] = input(f"{attribute}: ") or info[attribute]
completer.complete_extensions(None)
if info["format"] == "diffusers":
vae = info.get("vae", dict(repo_id=None, path=None, subfolder=None))
@@ -1014,14 +941,13 @@ def add_postprocessing_to_metadata(opt, original_file, new_file, tool, command):
def prepare_image_metadata(
opt,
prefix,
seed,
operation="generate",
prior_variations=[],
postprocessed=False,
first_seed=None,
model_id='unknown',
opt,
prefix,
seed,
operation="generate",
prior_variations=[],
postprocessed=False,
first_seed=None,
):
if postprocessed and opt.save_original:
filename = choose_postprocess_name(opt, prefix, seed)
@@ -1029,7 +955,6 @@ def prepare_image_metadata(
wildcards = dict(opt.__dict__)
wildcards["prefix"] = prefix
wildcards["seed"] = seed
wildcards["model_id"] = model_id
try:
filename = opt.fnformat.format(**wildcards)
except KeyError as e:
@@ -1047,17 +972,18 @@ def prepare_image_metadata(
first_seed = first_seed or seed
this_variation = [[seed, opt.variation_amount]]
opt.with_variations = prior_variations + this_variation
formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed,model_id=model_id)
formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed)
elif len(prior_variations) > 0:
formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed,model_id=model_id)
formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed)
elif operation == "postprocess":
formatted_dream_prompt = "!fix " + opt.dream_prompt_str(
seed=seed, prompt=opt.input_file_path, model_id=model_id,
seed=seed, prompt=opt.input_file_path
)
else:
formatted_dream_prompt = opt.dream_prompt_str(seed=seed,model_id=model_id)
formatted_dream_prompt = opt.dream_prompt_str(seed=seed)
return filename, formatted_dream_prompt
def choose_postprocess_name(opt, prefix, seed) -> str:
match = re.search("postprocess:(\w+)", opt.last_operation)
if match:
@@ -1308,75 +1234,6 @@ def check_internet() -> bool:
except:
return False
def retrieve_last_used_model()->str:
"""
Return name of the last model used.
"""
model_file_path = Path(Globals.root,'.last_model')
if not model_file_path.exists():
return None
with open(model_file_path,'r') as f:
return f.readline()
# This routine performs any patch-ups needed after installation
def run_patches():
install_missing_config_files()
version_file = Path(Globals.root,'.version')
if version_file.exists():
with open(version_file,'r') as f:
root_version = version.parse(f.readline() or 'v2.3.2')
else:
root_version = version.parse('v2.3.2')
app_version = version.parse(ldm.invoke.__version__)
if root_version < app_version:
try:
do_version_update(root_version, ldm.invoke.__version__)
with open(version_file,'w') as f:
f.write(ldm.invoke.__version__)
except:
print("** Update failed. Will try again on next launch")
def install_missing_config_files():
"""
install ckpt configuration files that may have been added to the
distro after original root directory configuration
"""
pass
# import invokeai.configs as conf
# from shutil import copyfile
# root_configs = Path(global_config_dir(), 'stable-diffusion')
# repo_configs = Path(conf.__path__[0], 'stable-diffusion')
# for src in repo_configs.iterdir():
# dest = root_configs / src.name
# if not dest.exists():
# copyfile(src,dest)
def do_version_update(root_version: version.Version, app_version: Union[str, version.Version]):
"""
Make any updates to the launcher .sh and .bat scripts that may be needed
from release to release. This is not an elegant solution. Instead, the
launcher should be moved into the source tree and installed using pip.
"""
if root_version < version.Version('v2.3.4'):
dest = Path(Globals.root,'loras')
dest.mkdir(exist_ok=True)
if root_version < version.Version('v2.3.3'):
if sys.platform == "linux":
print('>> Downloading new version of launcher script and its config file')
from ldm.util import download_with_progress_bar
url_base = f'https://raw.githubusercontent.com/invoke-ai/InvokeAI/v{str(app_version)}/installer/templates/'
dest = Path(Globals.root,'invoke.sh.in')
assert download_with_progress_bar(url_base+'invoke.sh.in',dest)
dest.replace(Path(Globals.root,'invoke.sh'))
os.chmod(Path(Globals.root,'invoke.sh'), 0o0755)
dest = Path(Globals.root,'dialogrc')
assert download_with_progress_bar(url_base+'dialogrc',dest)
dest.replace(Path(Globals.root,'.dialogrc'))
if __name__ == '__main__':
main()

View File

@@ -1 +1 @@
__version__='2.3.4'
__version__='2.3.1.post2'

View File

@@ -333,7 +333,7 @@ class Args(object):
switches.append(f'-V {formatted_variations}')
if 'variations' in a and len(a['variations'])>0:
switches.append(f'-V {a["variations"]}')
return ' '.join(switches) + f' # model_id={kwargs.get("model_id","unknown model")}'
return ' '.join(switches)
def __getattribute__(self,name):
'''
@@ -522,8 +522,8 @@ class Args(object):
'--ckpt_convert',
action=argparse.BooleanOptionalAction,
dest='ckpt_convert',
default=True,
help='Deprecated option. Legacy ckpt files are now always converted to diffusers when loaded.'
default=False,
help='Load legacy ckpt files as diffusers. Pass --no-ckpt-convert to inhibit this behavior',
)
model_group.add_argument(
'--internet',
@@ -654,13 +654,6 @@ class Args(object):
type=str,
help='Path to a directory containing .bin and/or .pt files, or a single .bin/.pt file. You may use subdirectories. (default is ROOTDIR/embeddings)'
)
render_group.add_argument(
'--lora_directory',
dest='lora_path',
default='loras',
type=str,
help='Path to a directory containing LoRA files; subdirectories are not supported. (default is ROOTDIR/loras)'
)
render_group.add_argument(
'--embeddings',
action=argparse.BooleanOptionalAction,
@@ -798,12 +791,12 @@ class Args(object):
*Model manipulation*
!models -- list models in configs/models.yaml
!switch <model_name> -- switch to model named <model_name>
!import_model /path/to/weights/file -- imports a model from a ckpt or safetensors file
!import_model /path/to/weights/file.ckpt -- adds a .ckpt model to your config
!import_model /path/to/weights/ -- interactively import models from a directory
!import_model http://path_to_model -- downloads and adds a ckpt or safetensors model to your config
!import_model hakurei/waifu-diffusion -- downloads and adds a diffusers model to your config using its repo_id
!optimize_model <model_name> -- converts a .ckpt/.safetensors model to a diffusers model
!convert_model /path/to/weights/file -- converts a .ckpt file path to a diffusers model and adds to your config
!import_model http://path_to_model.ckpt -- downloads and adds a .ckpt model to your config
!import_model hakurei/waifu-diffusion -- downloads and adds a diffusers model to your config
!optimize_model <model_name> -- converts a .ckpt model to a diffusers model
!convert_model /path/to/weights/file.ckpt -- converts a .ckpt file path to a diffusers model
!edit_model <model_name> -- edit a model's description
!del_model <model_name> -- delete a model
"""
@@ -885,7 +878,7 @@ class Args(object):
)
render_group.add_argument(
'--fnformat',
default=None,
default='{prefix}.{seed}.png',
type=str,
help='Overwrite the filename format. You can use any argument as wildcard enclosed in curly braces. Default is {prefix}.{seed}.png',
)
@@ -1162,7 +1155,6 @@ def format_metadata(**kwargs):
def metadata_dumps(opt,
seeds=[],
model_hash=None,
model_id=None,
postprocessing=None):
'''
Given an Args object, returns a dict containing the keys and
@@ -1175,7 +1167,7 @@ def metadata_dumps(opt,
# top-level metadata minus `image` or `images`
metadata = {
'model' : 'stable diffusion',
'model_id' : model_id or opt.model,
'model_id' : opt.model,
'model_hash' : model_hash,
'app_id' : ldm.invoke.__app_id__,
'app_version' : ldm.invoke.__version__,
@@ -1188,7 +1180,7 @@ def metadata_dumps(opt,
)
# remove any image keys not mentioned in RFC #266
rfc266_img_fields = ['type','postprocessing','sampler','prompt','seed','variations','steps','hires_fix',
rfc266_img_fields = ['type','postprocessing','sampler','prompt','seed','variations','steps',
'cfg_scale','threshold','perlin','step_number','width','height','extra','strength','seamless'
'init_img','init_mask','facetool','facetool_strength','upscale','h_symmetry_time_pct',
'v_symmetry_time_pct']

View File

@@ -0,0 +1,4 @@
'''
Initialization file for the ldm.invoke.generator package
'''
from .base import CkptGenerator

View File

@@ -0,0 +1,335 @@
'''
Base class for ldm.invoke.ckpt_generator.*
including img2img, txt2img, and inpaint
THESE MODULES ARE TRANSITIONAL AND WILL BE REMOVED AT A FUTURE DATE
WHEN LEGACY CKPT MODEL SUPPORT IS DISCONTINUED.
'''
import torch
import numpy as np
import random
import os
import os.path as osp
import traceback
from tqdm import tqdm, trange
from PIL import Image, ImageFilter, ImageChops
import cv2 as cv
from einops import rearrange, repeat
from pathlib import Path
from pytorch_lightning import seed_everything
import invokeai.assets.web as web_assets
from ldm.invoke.devices import choose_autocast
from ldm.models.diffusion.cross_attention_map_saving import AttentionMapSaver
from ldm.util import rand_perlin_2d
downsampling = 8
CAUTION_IMG = 'caution.png'
class CkptGenerator():
def __init__(self, model, precision):
self.model = model
self.precision = precision
self.seed = None
self.latent_channels = model.channels
self.downsampling_factor = downsampling # BUG: should come from model or config
self.safety_checker = None
self.perlin = 0.0
self.threshold = 0
self.variation_amount = 0
self.with_variations = []
self.use_mps_noise = False
self.free_gpu_mem = None
self.caution_img = None
# this is going to be overridden in img2img.py, txt2img.py and inpaint.py
def get_make_image(self,prompt,**kwargs):
"""
Returns a function returning an image derived from the prompt and the initial image
Return value depends on the seed at the time you call it
"""
raise NotImplementedError("image_iterator() must be implemented in a descendent class")
def set_variation(self, seed, variation_amount, with_variations):
self.seed = seed
self.variation_amount = variation_amount
self.with_variations = with_variations
def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None,
image_callback=None, step_callback=None, threshold=0.0, perlin=0.0,
safety_checker:dict=None,
attention_maps_callback = None,
free_gpu_mem: bool=False,
**kwargs):
scope = choose_autocast(self.precision)
self.safety_checker = safety_checker
self.free_gpu_mem = free_gpu_mem
attention_maps_images = []
attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image())
make_image = self.get_make_image(
prompt,
sampler = sampler,
init_image = init_image,
width = width,
height = height,
step_callback = step_callback,
threshold = threshold,
perlin = perlin,
attention_maps_callback = attention_maps_callback,
**kwargs
)
results = []
seed = seed if seed is not None and seed >= 0 else self.new_seed()
first_seed = seed
seed, initial_noise = self.generate_initial_noise(seed, width, height)
# There used to be an additional self.model.ema_scope() here, but it breaks
# the inpaint-1.5 model. Not sure what it did.... ?
with scope(self.model.device.type):
for n in trange(iterations, desc='Generating'):
x_T = None
if self.variation_amount > 0:
seed_everything(seed)
target_noise = self.get_noise(width,height)
x_T = self.slerp(self.variation_amount, initial_noise, target_noise)
elif initial_noise is not None:
# i.e. we specified particular variations
x_T = initial_noise
else:
seed_everything(seed)
try:
x_T = self.get_noise(width,height)
except:
print('** An error occurred while getting initial noise **')
print(traceback.format_exc())
image = make_image(x_T)
if self.safety_checker is not None:
image = self.safety_check(image)
results.append([image, seed])
if image_callback is not None:
attention_maps_image = None if len(attention_maps_images)==0 else attention_maps_images[-1]
image_callback(image, seed, first_seed=first_seed, attention_maps_image=attention_maps_image)
seed = self.new_seed()
return results
def sample_to_image(self,samples)->Image.Image:
"""
Given samples returned from a sampler, converts
it into a PIL Image
"""
x_samples = self.model.decode_first_stage(samples)
x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
if len(x_samples) != 1:
raise Exception(
f'>> expected to get a single image, but got {len(x_samples)}')
x_sample = 255.0 * rearrange(
x_samples[0].cpu().numpy(), 'c h w -> h w c'
)
return Image.fromarray(x_sample.astype(np.uint8))
# write an approximate RGB image from latent samples for a single step to PNG
def repaste_and_color_correct(self, result: Image.Image, init_image: Image.Image, init_mask: Image.Image, mask_blur_radius: int = 8) -> Image.Image:
if init_image is None or init_mask is None:
return result
# Get the original alpha channel of the mask if there is one.
# Otherwise it is some other black/white image format ('1', 'L' or 'RGB')
pil_init_mask = init_mask.getchannel('A') if init_mask.mode == 'RGBA' else init_mask.convert('L')
pil_init_image = init_image.convert('RGBA') # Add an alpha channel if one doesn't exist
# Build an image with only visible pixels from source to use as reference for color-matching.
init_rgb_pixels = np.asarray(init_image.convert('RGB'), dtype=np.uint8)
init_a_pixels = np.asarray(pil_init_image.getchannel('A'), dtype=np.uint8)
init_mask_pixels = np.asarray(pil_init_mask, dtype=np.uint8)
# Get numpy version of result
np_image = np.asarray(result, dtype=np.uint8)
# Mask and calculate mean and standard deviation
mask_pixels = init_a_pixels * init_mask_pixels > 0
np_init_rgb_pixels_masked = init_rgb_pixels[mask_pixels, :]
np_image_masked = np_image[mask_pixels, :]
if np_init_rgb_pixels_masked.size > 0:
init_means = np_init_rgb_pixels_masked.mean(axis=0)
init_std = np_init_rgb_pixels_masked.std(axis=0)
gen_means = np_image_masked.mean(axis=0)
gen_std = np_image_masked.std(axis=0)
# Color correct
np_matched_result = np_image.copy()
np_matched_result[:,:,:] = (((np_matched_result[:,:,:].astype(np.float32) - gen_means[None,None,:]) / gen_std[None,None,:]) * init_std[None,None,:] + init_means[None,None,:]).clip(0, 255).astype(np.uint8)
matched_result = Image.fromarray(np_matched_result, mode='RGB')
else:
matched_result = Image.fromarray(np_image, mode='RGB')
# Blur the mask out (into init image) by specified amount
if mask_blur_radius > 0:
nm = np.asarray(pil_init_mask, dtype=np.uint8)
nmd = cv.erode(nm, kernel=np.ones((3,3), dtype=np.uint8), iterations=int(mask_blur_radius / 2))
pmd = Image.fromarray(nmd, mode='L')
blurred_init_mask = pmd.filter(ImageFilter.BoxBlur(mask_blur_radius))
else:
blurred_init_mask = pil_init_mask
multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, self.pil_image.split()[-1])
# Paste original on color-corrected generation (using blurred mask)
matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask)
return matched_result
def sample_to_lowres_estimated_image(self,samples):
# origingally adapted from code by @erucipe and @keturn here:
# https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7
# these updated numbers for v1.5 are from @torridgristle
v1_5_latent_rgb_factors = torch.tensor([
# R G B
[ 0.3444, 0.1385, 0.0670], # L1
[ 0.1247, 0.4027, 0.1494], # L2
[-0.3192, 0.2513, 0.2103], # L3
[-0.1307, -0.1874, -0.7445] # L4
], dtype=samples.dtype, device=samples.device)
latent_image = samples[0].permute(1, 2, 0) @ v1_5_latent_rgb_factors
latents_ubyte = (((latent_image + 1) / 2)
.clamp(0, 1) # change scale from -1..1 to 0..1
.mul(0xFF) # to 0..255
.byte()).cpu()
return Image.fromarray(latents_ubyte.numpy())
def generate_initial_noise(self, seed, width, height):
initial_noise = None
if self.variation_amount > 0 or len(self.with_variations) > 0:
# use fixed initial noise plus random noise per iteration
seed_everything(seed)
initial_noise = self.get_noise(width,height)
for v_seed, v_weight in self.with_variations:
seed = v_seed
seed_everything(seed)
next_noise = self.get_noise(width,height)
initial_noise = self.slerp(v_weight, initial_noise, next_noise)
if self.variation_amount > 0:
random.seed() # reset RNG to an actually random state, so we can get a random seed for variations
seed = random.randrange(0,np.iinfo(np.uint32).max)
return (seed, initial_noise)
else:
return (seed, None)
# returns a tensor filled with random numbers from a normal distribution
def get_noise(self,width,height):
"""
Returns a tensor filled with random numbers, either form a normal distribution
(txt2img) or from the latent image (img2img, inpaint)
"""
raise NotImplementedError("get_noise() must be implemented in a descendent class")
def get_perlin_noise(self,width,height):
fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device
return torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device)
def new_seed(self):
self.seed = random.randrange(0, np.iinfo(np.uint32).max)
return self.seed
def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995):
'''
Spherical linear interpolation
Args:
t (float/np.ndarray): Float value between 0.0 and 1.0
v0 (np.ndarray): Starting vector
v1 (np.ndarray): Final vector
DOT_THRESHOLD (float): Threshold for considering the two vectors as
colineal. Not recommended to alter this.
Returns:
v2 (np.ndarray): Interpolation vector between v0 and v1
'''
inputs_are_torch = False
if not isinstance(v0, np.ndarray):
inputs_are_torch = True
v0 = v0.detach().cpu().numpy()
if not isinstance(v1, np.ndarray):
inputs_are_torch = True
v1 = v1.detach().cpu().numpy()
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
if np.abs(dot) > DOT_THRESHOLD:
v2 = (1 - t) * v0 + t * v1
else:
theta_0 = np.arccos(dot)
sin_theta_0 = np.sin(theta_0)
theta_t = theta_0 * t
sin_theta_t = np.sin(theta_t)
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
s1 = sin_theta_t / sin_theta_0
v2 = s0 * v0 + s1 * v1
if inputs_are_torch:
v2 = torch.from_numpy(v2).to(self.model.device)
return v2
def safety_check(self,image:Image.Image):
'''
If the CompViz safety checker flags an NSFW image, we
blur it out.
'''
import diffusers
checker = self.safety_checker['checker']
extractor = self.safety_checker['extractor']
features = extractor([image], return_tensors="pt")
features.to(self.model.device)
# unfortunately checker requires the numpy version, so we have to convert back
x_image = np.array(image).astype(np.float32) / 255.0
x_image = x_image[None].transpose(0, 3, 1, 2)
diffusers.logging.set_verbosity_error()
checked_image, has_nsfw_concept = checker(images=x_image, clip_input=features.pixel_values)
if has_nsfw_concept[0]:
print('** An image with potential non-safe content has been detected. A blurred image will be returned. **')
return self.blur(image)
else:
return image
def blur(self,input):
blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32))
try:
caution = self.get_caution_img()
if caution:
blurry.paste(caution,(0,0),caution)
except FileNotFoundError:
pass
return blurry
def get_caution_img(self):
path = None
if self.caution_img:
return self.caution_img
path = Path(web_assets.__path__[0]) / CAUTION_IMG
caution = Image.open(path)
self.caution_img = caution.resize((caution.width // 2, caution.height //2))
return self.caution_img
# this is a handy routine for debugging use. Given a generated sample,
# convert it into a PNG image and store it at the indicated path
def save_sample(self, sample, filepath):
image = self.sample_to_image(sample)
dirname = os.path.dirname(filepath) or '.'
if not os.path.exists(dirname):
print(f'** creating directory {dirname}')
os.makedirs(dirname, exist_ok=True)
image.save(filepath,'PNG')
def torch_dtype(self)->torch.dtype:
return torch.float16 if self.precision == 'float16' else torch.float32

View File

@@ -0,0 +1,501 @@
'''
ldm.invoke.ckpt_generator.embiggen descends from ldm.invoke.ckpt_generator
and generates with ldm.invoke.ckpt_generator.img2img
'''
import torch
import numpy as np
from tqdm import trange
from PIL import Image
from ldm.invoke.ckpt_generator.base import CkptGenerator
from ldm.invoke.ckpt_generator.img2img import CkptImg2Img
from ldm.invoke.devices import choose_autocast
from ldm.models.diffusion.ddim import DDIMSampler
class CkptEmbiggen(CkptGenerator):
def __init__(self, model, precision):
super().__init__(model, precision)
self.init_latent = None
# Replace generate because Embiggen doesn't need/use most of what it does normallly
def generate(self,prompt,iterations=1,seed=None,
image_callback=None, step_callback=None,
**kwargs):
scope = choose_autocast(self.precision)
make_image = self.get_make_image(
prompt,
step_callback = step_callback,
**kwargs
)
results = []
seed = seed if seed else self.new_seed()
# Noise will be generated by the Img2Img generator when called
with scope(self.model.device.type), self.model.ema_scope():
for n in trange(iterations, desc='Generating'):
# make_image will call Img2Img which will do the equivalent of get_noise itself
image = make_image()
results.append([image, seed])
if image_callback is not None:
image_callback(image, seed, prompt_in=prompt)
seed = self.new_seed()
return results
@torch.no_grad()
def get_make_image(
self,
prompt,
sampler,
steps,
cfg_scale,
ddim_eta,
conditioning,
init_img,
strength,
width,
height,
embiggen,
embiggen_tiles,
step_callback=None,
**kwargs
):
"""
Returns a function returning an image derived from the prompt and multi-stage twice-baked potato layering over the img2img on the initial image
Return value depends on the seed at the time you call it
"""
assert not sampler.uses_inpainting_model(), "--embiggen is not supported by inpainting models"
# Construct embiggen arg array, and sanity check arguments
if embiggen == None: # embiggen can also be called with just embiggen_tiles
embiggen = [1.0] # If not specified, assume no scaling
elif embiggen[0] < 0:
embiggen[0] = 1.0
print(
'>> Embiggen scaling factor cannot be negative, fell back to the default of 1.0 !')
if len(embiggen) < 2:
embiggen.append(0.75)
elif embiggen[1] > 1.0 or embiggen[1] < 0:
embiggen[1] = 0.75
print('>> Embiggen upscaling strength for ESRGAN must be between 0 and 1, fell back to the default of 0.75 !')
if len(embiggen) < 3:
embiggen.append(0.25)
elif embiggen[2] < 0:
embiggen[2] = 0.25
print('>> Overlap size for Embiggen must be a positive ratio between 0 and 1 OR a number of pixels, fell back to the default of 0.25 !')
# Convert tiles from their user-freindly count-from-one to count-from-zero, because we need to do modulo math
# and then sort them, because... people.
if embiggen_tiles:
embiggen_tiles = list(map(lambda n: n-1, embiggen_tiles))
embiggen_tiles.sort()
if strength >= 0.5:
print(f'* WARNING: Embiggen may produce mirror motifs if the strength (-f) is too high (currently {strength}). Try values between 0.35-0.45.')
# Prep img2img generator, since we wrap over it
gen_img2img = CkptImg2Img(self.model,self.precision)
# Open original init image (not a tensor) to manipulate
initsuperimage = Image.open(init_img)
with Image.open(init_img) as img:
initsuperimage = img.convert('RGB')
# Size of the target super init image in pixels
initsuperwidth, initsuperheight = initsuperimage.size
# Increase by scaling factor if not already resized, using ESRGAN as able
if embiggen[0] != 1.0:
initsuperwidth = round(initsuperwidth*embiggen[0])
initsuperheight = round(initsuperheight*embiggen[0])
if embiggen[1] > 0: # No point in ESRGAN upscaling if strength is set zero
from ldm.invoke.restoration.realesrgan import ESRGAN
esrgan = ESRGAN()
print(
f'>> ESRGAN upscaling init image prior to cutting with Embiggen with strength {embiggen[1]}')
if embiggen[0] > 2:
initsuperimage = esrgan.process(
initsuperimage,
embiggen[1], # upscale strength
self.seed,
4, # upscale scale
)
else:
initsuperimage = esrgan.process(
initsuperimage,
embiggen[1], # upscale strength
self.seed,
2, # upscale scale
)
# We could keep recursively re-running ESRGAN for a requested embiggen[0] larger than 4x
# but from personal experiance it doesn't greatly improve anything after 4x
# Resize to target scaling factor resolution
initsuperimage = initsuperimage.resize(
(initsuperwidth, initsuperheight), Image.Resampling.LANCZOS)
# Use width and height as tile widths and height
# Determine buffer size in pixels
if embiggen[2] < 1:
if embiggen[2] < 0:
embiggen[2] = 0
overlap_size_x = round(embiggen[2] * width)
overlap_size_y = round(embiggen[2] * height)
else:
overlap_size_x = round(embiggen[2])
overlap_size_y = round(embiggen[2])
# With overall image width and height known, determine how many tiles we need
def ceildiv(a, b):
return -1 * (-a // b)
# X and Y needs to be determined independantly (we may have savings on one based on the buffer pixel count)
# (initsuperwidth - width) is the area remaining to the right that we need to layers tiles to fill
# (width - overlap_size_x) is how much new we can fill with a single tile
emb_tiles_x = 1
emb_tiles_y = 1
if (initsuperwidth - width) > 0:
emb_tiles_x = ceildiv(initsuperwidth - width,
width - overlap_size_x) + 1
if (initsuperheight - height) > 0:
emb_tiles_y = ceildiv(initsuperheight - height,
height - overlap_size_y) + 1
# Sanity
assert emb_tiles_x > 1 or emb_tiles_y > 1, f'ERROR: Based on the requested dimensions of {initsuperwidth}x{initsuperheight} and tiles of {width}x{height} you don\'t need to Embiggen! Check your arguments.'
# Prep alpha layers --------------
# https://stackoverflow.com/questions/69321734/how-to-create-different-transparency-like-gradient-with-python-pil
# agradientL is Left-side transparent
agradientL = Image.linear_gradient('L').rotate(
90).resize((overlap_size_x, height))
# agradientT is Top-side transparent
agradientT = Image.linear_gradient('L').resize((width, overlap_size_y))
# radial corner is the left-top corner, made full circle then cut to just the left-top quadrant
agradientC = Image.new('L', (256, 256))
for y in range(256):
for x in range(256):
# Find distance to lower right corner (numpy takes arrays)
distanceToLR = np.sqrt([(255 - x) ** 2 + (255 - y) ** 2])[0]
# Clamp values to max 255
if distanceToLR > 255:
distanceToLR = 255
#Place the pixel as invert of distance
agradientC.putpixel((x, y), round(255 - distanceToLR))
# Create alternative asymmetric diagonal corner to use on "tailing" intersections to prevent hard edges
# Fits for a left-fading gradient on the bottom side and full opacity on the right side.
agradientAsymC = Image.new('L', (256, 256))
for y in range(256):
for x in range(256):
value = round(max(0, x-(255-y)) * (255 / max(1,y)))
#Clamp values
value = max(0, value)
value = min(255, value)
agradientAsymC.putpixel((x, y), value)
# Create alpha layers default fully white
alphaLayerL = Image.new("L", (width, height), 255)
alphaLayerT = Image.new("L", (width, height), 255)
alphaLayerLTC = Image.new("L", (width, height), 255)
# Paste gradients into alpha layers
alphaLayerL.paste(agradientL, (0, 0))
alphaLayerT.paste(agradientT, (0, 0))
alphaLayerLTC.paste(agradientL, (0, 0))
alphaLayerLTC.paste(agradientT, (0, 0))
alphaLayerLTC.paste(agradientC.resize((overlap_size_x, overlap_size_y)), (0, 0))
# make masks with an asymmetric upper-right corner so when the curved transparent corner of the next tile
# to its right is placed it doesn't reveal a hard trailing semi-transparent edge in the overlapping space
alphaLayerTaC = alphaLayerT.copy()
alphaLayerTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0))
alphaLayerLTaC = alphaLayerLTC.copy()
alphaLayerLTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0))
if embiggen_tiles:
# Individual unconnected sides
alphaLayerR = Image.new("L", (width, height), 255)
alphaLayerR.paste(agradientL.rotate(
180), (width - overlap_size_x, 0))
alphaLayerB = Image.new("L", (width, height), 255)
alphaLayerB.paste(agradientT.rotate(
180), (0, height - overlap_size_y))
alphaLayerTB = Image.new("L", (width, height), 255)
alphaLayerTB.paste(agradientT, (0, 0))
alphaLayerTB.paste(agradientT.rotate(
180), (0, height - overlap_size_y))
alphaLayerLR = Image.new("L", (width, height), 255)
alphaLayerLR.paste(agradientL, (0, 0))
alphaLayerLR.paste(agradientL.rotate(
180), (width - overlap_size_x, 0))
# Sides and corner Layers
alphaLayerRBC = Image.new("L", (width, height), 255)
alphaLayerRBC.paste(agradientL.rotate(
180), (width - overlap_size_x, 0))
alphaLayerRBC.paste(agradientT.rotate(
180), (0, height - overlap_size_y))
alphaLayerRBC.paste(agradientC.rotate(180).resize(
(overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y))
alphaLayerLBC = Image.new("L", (width, height), 255)
alphaLayerLBC.paste(agradientL, (0, 0))
alphaLayerLBC.paste(agradientT.rotate(
180), (0, height - overlap_size_y))
alphaLayerLBC.paste(agradientC.rotate(90).resize(
(overlap_size_x, overlap_size_y)), (0, height - overlap_size_y))
alphaLayerRTC = Image.new("L", (width, height), 255)
alphaLayerRTC.paste(agradientL.rotate(
180), (width - overlap_size_x, 0))
alphaLayerRTC.paste(agradientT, (0, 0))
alphaLayerRTC.paste(agradientC.rotate(270).resize(
(overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0))
# All but X layers
alphaLayerABT = Image.new("L", (width, height), 255)
alphaLayerABT.paste(alphaLayerLBC, (0, 0))
alphaLayerABT.paste(agradientL.rotate(
180), (width - overlap_size_x, 0))
alphaLayerABT.paste(agradientC.rotate(180).resize(
(overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y))
alphaLayerABL = Image.new("L", (width, height), 255)
alphaLayerABL.paste(alphaLayerRTC, (0, 0))
alphaLayerABL.paste(agradientT.rotate(
180), (0, height - overlap_size_y))
alphaLayerABL.paste(agradientC.rotate(180).resize(
(overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y))
alphaLayerABR = Image.new("L", (width, height), 255)
alphaLayerABR.paste(alphaLayerLBC, (0, 0))
alphaLayerABR.paste(agradientT, (0, 0))
alphaLayerABR.paste(agradientC.resize(
(overlap_size_x, overlap_size_y)), (0, 0))
alphaLayerABB = Image.new("L", (width, height), 255)
alphaLayerABB.paste(alphaLayerRTC, (0, 0))
alphaLayerABB.paste(agradientL, (0, 0))
alphaLayerABB.paste(agradientC.resize(
(overlap_size_x, overlap_size_y)), (0, 0))
# All-around layer
alphaLayerAA = Image.new("L", (width, height), 255)
alphaLayerAA.paste(alphaLayerABT, (0, 0))
alphaLayerAA.paste(agradientT, (0, 0))
alphaLayerAA.paste(agradientC.resize(
(overlap_size_x, overlap_size_y)), (0, 0))
alphaLayerAA.paste(agradientC.rotate(270).resize(
(overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0))
# Clean up temporary gradients
del agradientL
del agradientT
del agradientC
def make_image():
# Make main tiles -------------------------------------------------
if embiggen_tiles:
print(f'>> Making {len(embiggen_tiles)} Embiggen tiles...')
else:
print(
f'>> Making {(emb_tiles_x * emb_tiles_y)} Embiggen tiles ({emb_tiles_x}x{emb_tiles_y})...')
emb_tile_store = []
# Although we could use the same seed for every tile for determinism, at higher strengths this may
# produce duplicated structures for each tile and make the tiling effect more obvious
# instead track and iterate a local seed we pass to Img2Img
seed = self.seed
seedintlimit = np.iinfo(np.uint32).max - 1 # only retreive this one from numpy
for tile in range(emb_tiles_x * emb_tiles_y):
# Don't iterate on first tile
if tile != 0:
if seed < seedintlimit:
seed += 1
else:
seed = 0
# Determine if this is a re-run and replace
if embiggen_tiles and not tile in embiggen_tiles:
continue
# Get row and column entries
emb_row_i = tile // emb_tiles_x
emb_column_i = tile % emb_tiles_x
# Determine bounds to cut up the init image
# Determine upper-left point
if emb_column_i + 1 == emb_tiles_x:
left = initsuperwidth - width
else:
left = round(emb_column_i * (width - overlap_size_x))
if emb_row_i + 1 == emb_tiles_y:
top = initsuperheight - height
else:
top = round(emb_row_i * (height - overlap_size_y))
right = left + width
bottom = top + height
# Cropped image of above dimension (does not modify the original)
newinitimage = initsuperimage.crop((left, top, right, bottom))
# DEBUG:
# newinitimagepath = init_img[0:-4] + f'_emb_Ti{tile}.png'
# newinitimage.save(newinitimagepath)
if embiggen_tiles:
print(
f'Making tile #{tile + 1} ({embiggen_tiles.index(tile) + 1} of {len(embiggen_tiles)} requested)')
else:
print(
f'Starting {tile + 1} of {(emb_tiles_x * emb_tiles_y)} tiles')
# create a torch tensor from an Image
newinitimage = np.array(
newinitimage).astype(np.float32) / 255.0
newinitimage = newinitimage[None].transpose(0, 3, 1, 2)
newinitimage = torch.from_numpy(newinitimage)
newinitimage = 2.0 * newinitimage - 1.0
newinitimage = newinitimage.to(self.model.device)
tile_results = gen_img2img.generate(
prompt,
iterations = 1,
seed = seed,
sampler = DDIMSampler(self.model, device=self.model.device),
steps = steps,
cfg_scale = cfg_scale,
conditioning = conditioning,
ddim_eta = ddim_eta,
image_callback = None, # called only after the final image is generated
step_callback = step_callback, # called after each intermediate image is generated
width = width,
height = height,
init_image = newinitimage, # notice that init_image is different from init_img
mask_image = None,
strength = strength,
)
emb_tile_store.append(tile_results[0][0])
# DEBUG (but, also has other uses), worth saving if you want tiles without a transparency overlap to manually composite
# emb_tile_store[-1].save(init_img[0:-4] + f'_emb_To{tile}.png')
del newinitimage
# Sanity check we have them all
if len(emb_tile_store) == (emb_tiles_x * emb_tiles_y) or (embiggen_tiles != [] and len(emb_tile_store) == len(embiggen_tiles)):
outputsuperimage = Image.new(
"RGBA", (initsuperwidth, initsuperheight))
if embiggen_tiles:
outputsuperimage.alpha_composite(
initsuperimage.convert('RGBA'), (0, 0))
for tile in range(emb_tiles_x * emb_tiles_y):
if embiggen_tiles:
if tile in embiggen_tiles:
intileimage = emb_tile_store.pop(0)
else:
continue
else:
intileimage = emb_tile_store[tile]
intileimage = intileimage.convert('RGBA')
# Get row and column entries
emb_row_i = tile // emb_tiles_x
emb_column_i = tile % emb_tiles_x
if emb_row_i == 0 and emb_column_i == 0 and not embiggen_tiles:
left = 0
top = 0
else:
# Determine upper-left point
if emb_column_i + 1 == emb_tiles_x:
left = initsuperwidth - width
else:
left = round(emb_column_i *
(width - overlap_size_x))
if emb_row_i + 1 == emb_tiles_y:
top = initsuperheight - height
else:
top = round(emb_row_i * (height - overlap_size_y))
# Handle gradients for various conditions
# Handle emb_rerun case
if embiggen_tiles:
# top of image
if emb_row_i == 0:
if emb_column_i == 0:
if (tile+1) in embiggen_tiles: # Look-ahead right
if (tile+emb_tiles_x) not in embiggen_tiles: # Look-ahead down
intileimage.putalpha(alphaLayerB)
# Otherwise do nothing on this tile
elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only
intileimage.putalpha(alphaLayerR)
else:
intileimage.putalpha(alphaLayerRBC)
elif emb_column_i == emb_tiles_x - 1:
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
intileimage.putalpha(alphaLayerL)
else:
intileimage.putalpha(alphaLayerLBC)
else:
if (tile+1) in embiggen_tiles: # Look-ahead right
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
intileimage.putalpha(alphaLayerL)
else:
intileimage.putalpha(alphaLayerLBC)
elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only
intileimage.putalpha(alphaLayerLR)
else:
intileimage.putalpha(alphaLayerABT)
# bottom of image
elif emb_row_i == emb_tiles_y - 1:
if emb_column_i == 0:
if (tile+1) in embiggen_tiles: # Look-ahead right
intileimage.putalpha(alphaLayerTaC)
else:
intileimage.putalpha(alphaLayerRTC)
elif emb_column_i == emb_tiles_x - 1:
# No tiles to look ahead to
intileimage.putalpha(alphaLayerLTC)
else:
if (tile+1) in embiggen_tiles: # Look-ahead right
intileimage.putalpha(alphaLayerLTaC)
else:
intileimage.putalpha(alphaLayerABB)
# vertical middle of image
else:
if emb_column_i == 0:
if (tile+1) in embiggen_tiles: # Look-ahead right
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
intileimage.putalpha(alphaLayerTaC)
else:
intileimage.putalpha(alphaLayerTB)
elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only
intileimage.putalpha(alphaLayerRTC)
else:
intileimage.putalpha(alphaLayerABL)
elif emb_column_i == emb_tiles_x - 1:
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
intileimage.putalpha(alphaLayerLTC)
else:
intileimage.putalpha(alphaLayerABR)
else:
if (tile+1) in embiggen_tiles: # Look-ahead right
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
intileimage.putalpha(alphaLayerLTaC)
else:
intileimage.putalpha(alphaLayerABR)
elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only
intileimage.putalpha(alphaLayerABB)
else:
intileimage.putalpha(alphaLayerAA)
# Handle normal tiling case (much simpler - since we tile left to right, top to bottom)
else:
if emb_row_i == 0 and emb_column_i >= 1:
intileimage.putalpha(alphaLayerL)
elif emb_row_i >= 1 and emb_column_i == 0:
if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right
intileimage.putalpha(alphaLayerT)
else:
intileimage.putalpha(alphaLayerTaC)
else:
if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right
intileimage.putalpha(alphaLayerLTC)
else:
intileimage.putalpha(alphaLayerLTaC)
# Layer tile onto final image
outputsuperimage.alpha_composite(intileimage, (left, top))
else:
print(f'Error: could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation.')
# after internal loops and patching up return Embiggen image
return outputsuperimage
# end of function declaration
return make_image

View File

@@ -0,0 +1,97 @@
'''
ldm.invoke.ckpt_generator.img2img descends from ldm.invoke.generator
'''
import torch
import numpy as np
import PIL
from torch import Tensor
from PIL import Image
from ldm.invoke.devices import choose_autocast
from ldm.invoke.ckpt_generator.base import CkptGenerator
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
class CkptImg2Img(CkptGenerator):
def __init__(self, model, precision):
super().__init__(model, precision)
self.init_latent = None # by get_noise()
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
conditioning,init_image,strength,step_callback=None,threshold=0.0,perlin=0.0,**kwargs):
"""
Returns a function returning an image derived from the prompt and the initial image
Return value depends on the seed at the time you call it.
"""
self.perlin = perlin
sampler.make_schedule(
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
)
if isinstance(init_image, PIL.Image.Image):
init_image = self._image_to_tensor(init_image.convert('RGB'))
scope = choose_autocast(self.precision)
with scope(self.model.device.type):
self.init_latent = self.model.get_first_stage_encoding(
self.model.encode_first_stage(init_image)
) # move to latent space
t_enc = int(strength * steps)
uc, c, extra_conditioning_info = conditioning
def make_image(x_T):
# encode (scaled latent)
z_enc = sampler.stochastic_encode(
self.init_latent,
torch.tensor([t_enc - 1]).to(self.model.device),
noise=x_T
)
if self.free_gpu_mem and self.model.model.device != self.model.device:
self.model.model.to(self.model.device)
# decode it
samples = sampler.decode(
z_enc,
c,
t_enc,
img_callback = step_callback,
unconditional_guidance_scale=cfg_scale,
unconditional_conditioning=uc,
init_latent = self.init_latent, # changes how noising is performed in ksampler
extra_conditioning_info = extra_conditioning_info,
all_timesteps_count = steps
)
if self.free_gpu_mem:
self.model.model.to("cpu")
return self.sample_to_image(samples)
return make_image
def get_noise(self,width,height):
device = self.model.device
init_latent = self.init_latent
assert init_latent is not None,'call to get_noise() when init_latent not set'
if device.type == 'mps':
x = torch.randn_like(init_latent, device='cpu').to(device)
else:
x = torch.randn_like(init_latent, device=device)
if self.perlin > 0.0:
shape = init_latent.shape
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2])
return x
def _image_to_tensor(self, image:Image, normalize:bool=True)->Tensor:
image = np.array(image).astype(np.float32) / 255.0
if len(image.shape) == 2: # 'L' image, as in a mask
image = image[None,None]
else: # 'RGB' image
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
if normalize:
image = 2.0 * image - 1.0
return image.to(self.model.device)

View File

@@ -0,0 +1,358 @@
'''
ldm.invoke.ckpt_generator.inpaint descends from ldm.invoke.ckpt_generator
'''
import math
import torch
import torchvision.transforms as T
import numpy as np
import cv2 as cv
import PIL
from PIL import Image, ImageFilter, ImageOps, ImageChops
from skimage.exposure.histogram_matching import match_histograms
from einops import rearrange, repeat
from ldm.invoke.devices import choose_autocast
from ldm.invoke.ckpt_generator.img2img import CkptImg2Img
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.ksampler import KSampler
from ldm.invoke.generator.base import downsampling
from ldm.util import debug_image
from ldm.invoke.patchmatch import PatchMatch
from ldm.invoke.globals import Globals
def infill_methods()->list[str]:
methods = list()
if PatchMatch.patchmatch_available():
methods.append('patchmatch')
methods.append('tile')
return methods
class CkptInpaint(CkptImg2Img):
def __init__(self, model, precision):
self.init_latent = None
self.pil_image = None
self.pil_mask = None
self.mask_blur_radius = 0
self.infill_method = None
super().__init__(model, precision)
# Outpaint support code
def get_tile_images(self, image: np.ndarray, width=8, height=8):
_nrows, _ncols, depth = image.shape
_strides = image.strides
nrows, _m = divmod(_nrows, height)
ncols, _n = divmod(_ncols, width)
if _m != 0 or _n != 0:
return None
return np.lib.stride_tricks.as_strided(
np.ravel(image),
shape=(nrows, ncols, height, width, depth),
strides=(height * _strides[0], width * _strides[1], *_strides),
writeable=False
)
def infill_patchmatch(self, im: Image.Image) -> Image:
if im.mode != 'RGBA':
return im
# Skip patchmatch if patchmatch isn't available
if not PatchMatch.patchmatch_available():
return im
# Patchmatch (note, we may want to expose patch_size? Increasing it significantly impacts performance though)
im_patched_np = PatchMatch.inpaint(im.convert('RGB'), ImageOps.invert(im.split()[-1]), patch_size = 3)
im_patched = Image.fromarray(im_patched_np, mode = 'RGB')
return im_patched
def tile_fill_missing(self, im: Image.Image, tile_size: int = 16, seed: int = None) -> Image:
# Only fill if there's an alpha layer
if im.mode != 'RGBA':
return im
a = np.asarray(im, dtype=np.uint8)
tile_size = (tile_size, tile_size)
# Get the image as tiles of a specified size
tiles = self.get_tile_images(a,*tile_size).copy()
# Get the mask as tiles
tiles_mask = tiles[:,:,:,:,3]
# Find any mask tiles with any fully transparent pixels (we will be replacing these later)
tmask_shape = tiles_mask.shape
tiles_mask = tiles_mask.reshape(math.prod(tiles_mask.shape))
n,ny = (math.prod(tmask_shape[0:2])), math.prod(tmask_shape[2:])
tiles_mask = (tiles_mask > 0)
tiles_mask = tiles_mask.reshape((n,ny)).all(axis = 1)
# Get RGB tiles in single array and filter by the mask
tshape = tiles.shape
tiles_all = tiles.reshape((math.prod(tiles.shape[0:2]), * tiles.shape[2:]))
filtered_tiles = tiles_all[tiles_mask]
if len(filtered_tiles) == 0:
return im
# Find all invalid tiles and replace with a random valid tile
replace_count = (tiles_mask == False).sum()
rng = np.random.default_rng(seed = seed)
tiles_all[np.logical_not(tiles_mask)] = filtered_tiles[rng.choice(filtered_tiles.shape[0], replace_count),:,:,:]
# Convert back to an image
tiles_all = tiles_all.reshape(tshape)
tiles_all = tiles_all.swapaxes(1,2)
st = tiles_all.reshape((math.prod(tiles_all.shape[0:2]), math.prod(tiles_all.shape[2:4]), tiles_all.shape[4]))
si = Image.fromarray(st, mode='RGBA')
return si
def mask_edge(self, mask: Image, edge_size: int, edge_blur: int) -> Image:
npimg = np.asarray(mask, dtype=np.uint8)
# Detect any partially transparent regions
npgradient = np.uint8(255 * (1.0 - np.floor(np.abs(0.5 - np.float32(npimg) / 255.0) * 2.0)))
# Detect hard edges
npedge = cv.Canny(npimg, threshold1=100, threshold2=200)
# Combine
npmask = npgradient + npedge
# Expand
npmask = cv.dilate(npmask, np.ones((3,3), np.uint8), iterations = int(edge_size / 2))
new_mask = Image.fromarray(npmask)
if edge_blur > 0:
new_mask = new_mask.filter(ImageFilter.BoxBlur(edge_blur))
return ImageOps.invert(new_mask)
def seam_paint(self,
im: Image.Image,
seam_size: int,
seam_blur: int,
prompt,sampler,steps,cfg_scale,ddim_eta,
conditioning,strength,
noise,
step_callback
) -> Image.Image:
hard_mask = self.pil_image.split()[-1].copy()
mask = self.mask_edge(hard_mask, seam_size, seam_blur)
make_image = self.get_make_image(
prompt,
sampler,
steps,
cfg_scale,
ddim_eta,
conditioning,
init_image = im.copy().convert('RGBA'),
mask_image = mask.convert('RGB'), # Code currently requires an RGB mask
strength = strength,
mask_blur_radius = 0,
seam_size = 0,
step_callback = step_callback,
inpaint_width = im.width,
inpaint_height = im.height
)
seam_noise = self.get_noise(im.width, im.height)
result = make_image(seam_noise)
return result
@torch.no_grad()
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
conditioning,init_image,mask_image,strength,
mask_blur_radius: int = 8,
# Seam settings - when 0, doesn't fill seam
seam_size: int = 0,
seam_blur: int = 0,
seam_strength: float = 0.7,
seam_steps: int = 10,
tile_size: int = 32,
step_callback=None,
inpaint_replace=False, enable_image_debugging=False,
infill_method = None,
inpaint_width=None,
inpaint_height=None,
**kwargs):
"""
Returns a function returning an image derived from the prompt and
the initial image + mask. Return value depends on the seed at
the time you call it. kwargs are 'init_latent' and 'strength'
"""
self.enable_image_debugging = enable_image_debugging
self.infill_method = infill_method or infill_methods()[0], # The infill method to use
self.inpaint_width = inpaint_width
self.inpaint_height = inpaint_height
if isinstance(init_image, PIL.Image.Image):
self.pil_image = init_image.copy()
# Do infill
if infill_method == 'patchmatch' and PatchMatch.patchmatch_available():
init_filled = self.infill_patchmatch(self.pil_image.copy())
else: # if infill_method == 'tile': # Only two methods right now, so always use 'tile' if not patchmatch
init_filled = self.tile_fill_missing(
self.pil_image.copy(),
seed = self.seed,
tile_size = tile_size
)
init_filled.paste(init_image, (0,0), init_image.split()[-1])
# Resize if requested for inpainting
if inpaint_width and inpaint_height:
init_filled = init_filled.resize((inpaint_width, inpaint_height))
debug_image(init_filled, "init_filled", debug_status=self.enable_image_debugging)
# Create init tensor
init_image = self._image_to_tensor(init_filled.convert('RGB'))
if isinstance(mask_image, PIL.Image.Image):
self.pil_mask = mask_image.copy()
debug_image(mask_image, "mask_image BEFORE multiply with pil_image", debug_status=self.enable_image_debugging)
mask_image = ImageChops.multiply(mask_image, self.pil_image.split()[-1].convert('RGB'))
self.pil_mask = mask_image
# Resize if requested for inpainting
if inpaint_width and inpaint_height:
mask_image = mask_image.resize((inpaint_width, inpaint_height))
debug_image(mask_image, "mask_image AFTER multiply with pil_image", debug_status=self.enable_image_debugging)
mask_image = mask_image.resize(
(
mask_image.width // downsampling,
mask_image.height // downsampling
),
resample=Image.Resampling.NEAREST
)
mask_image = self._image_to_tensor(mask_image,normalize=False)
self.mask_blur_radius = mask_blur_radius
# klms samplers not supported yet, so ignore previous sampler
if isinstance(sampler,KSampler):
print(
f">> Using recommended DDIM sampler for inpainting."
)
sampler = DDIMSampler(self.model, device=self.model.device)
sampler.make_schedule(
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
)
mask_image = mask_image[0][0].unsqueeze(0).repeat(4,1,1).unsqueeze(0)
mask_image = repeat(mask_image, '1 ... -> b ...', b=1)
scope = choose_autocast(self.precision)
with scope(self.model.device.type):
self.init_latent = self.model.get_first_stage_encoding(
self.model.encode_first_stage(init_image)
) # move to latent space
t_enc = int(strength * steps)
# todo: support cross-attention control
uc, c, _ = conditioning
print(f">> target t_enc is {t_enc} steps")
@torch.no_grad()
def make_image(x_T):
# encode (scaled latent)
z_enc = sampler.stochastic_encode(
self.init_latent,
torch.tensor([t_enc - 1]).to(self.model.device),
noise=x_T
)
# to replace masked area with latent noise, weighted by inpaint_replace strength
if inpaint_replace > 0.0:
print(f'>> inpaint will replace what was under the mask with a strength of {inpaint_replace}')
l_noise = self.get_noise(kwargs['width'],kwargs['height'])
inverted_mask = 1.0-mask_image # there will be 1s where the mask is
masked_region = (1.0-inpaint_replace) * inverted_mask * z_enc + inpaint_replace * inverted_mask * l_noise
z_enc = z_enc * mask_image + masked_region
if self.free_gpu_mem and self.model.model.device != self.model.device:
self.model.model.to(self.model.device)
# decode it
samples = sampler.decode(
z_enc,
c,
t_enc,
img_callback = step_callback,
unconditional_guidance_scale = cfg_scale,
unconditional_conditioning = uc,
mask = mask_image,
init_latent = self.init_latent
)
result = self.sample_to_image(samples)
# Seam paint if this is our first pass (seam_size set to 0 during seam painting)
if seam_size > 0:
old_image = self.pil_image or init_image
old_mask = self.pil_mask or mask_image
result = self.seam_paint(
result,
seam_size,
seam_blur,
prompt,
sampler,
seam_steps,
cfg_scale,
ddim_eta,
conditioning,
seam_strength,
x_T,
step_callback)
# Restore original settings
self.get_make_image(prompt,sampler,steps,cfg_scale,ddim_eta,
conditioning,
old_image,
old_mask,
strength,
mask_blur_radius, seam_size, seam_blur, seam_strength,
seam_steps, tile_size, step_callback,
inpaint_replace, enable_image_debugging,
inpaint_width = inpaint_width,
inpaint_height = inpaint_height,
infill_method = infill_method,
**kwargs)
return result
return make_image
def sample_to_image(self, samples)->Image.Image:
gen_result = super().sample_to_image(samples).convert('RGB')
debug_image(gen_result, "gen_result", debug_status=self.enable_image_debugging)
# Resize if necessary
if self.inpaint_width and self.inpaint_height:
gen_result = gen_result.resize(self.pil_image.size)
if self.pil_image is None or self.pil_mask is None:
return gen_result
corrected_result = super().repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius)
debug_image(corrected_result, "corrected_result", debug_status=self.enable_image_debugging)
return corrected_result

View File

@@ -0,0 +1,175 @@
"""omnibus module to be used with the runwayml 9-channel custom inpainting model"""
import torch
import numpy as np
from einops import repeat
from PIL import Image, ImageOps, ImageChops
from ldm.invoke.devices import choose_autocast
from ldm.invoke.ckpt_generator.base import downsampling
from ldm.invoke.ckpt_generator.img2img import CkptImg2Img
from ldm.invoke.ckpt_generator.txt2img import CkptTxt2Img
class CkptOmnibus(CkptImg2Img,CkptTxt2Img):
def __init__(self, model, precision):
super().__init__(model, precision)
self.pil_mask = None
self.pil_image = None
def get_make_image(
self,
prompt,
sampler,
steps,
cfg_scale,
ddim_eta,
conditioning,
width,
height,
init_image = None,
mask_image = None,
strength = None,
step_callback=None,
threshold=0.0,
perlin=0.0,
mask_blur_radius: int = 8,
**kwargs):
"""
Returns a function returning an image derived from the prompt and the initial image
Return value depends on the seed at the time you call it.
"""
self.perlin = perlin
num_samples = 1
sampler.make_schedule(
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
)
if isinstance(init_image, Image.Image):
self.pil_image = init_image
if init_image.mode != 'RGB':
init_image = init_image.convert('RGB')
init_image = self._image_to_tensor(init_image)
if isinstance(mask_image, Image.Image):
self.pil_mask = mask_image
mask_image = ImageChops.multiply(mask_image.convert('L'), self.pil_image.split()[-1])
mask_image = self._image_to_tensor(ImageOps.invert(mask_image), normalize=False)
self.mask_blur_radius = mask_blur_radius
t_enc = steps
if init_image is not None and mask_image is not None: # inpainting
masked_image = init_image * (1 - mask_image) # masked image is the image masked by mask - masked regions zero
elif init_image is not None: # img2img
scope = choose_autocast(self.precision)
with scope(self.model.device.type):
self.init_latent = self.model.get_first_stage_encoding(
self.model.encode_first_stage(init_image)
) # move to latent space
# create a completely black mask (1s)
mask_image = torch.ones(1, 1, init_image.shape[2], init_image.shape[3], device=self.model.device)
# and the masked image is just a copy of the original
masked_image = init_image
else: # txt2img
init_image = torch.zeros(1, 3, height, width, device=self.model.device)
mask_image = torch.ones(1, 1, height, width, device=self.model.device)
masked_image = init_image
self.init_latent = init_image
height = init_image.shape[2]
width = init_image.shape[3]
model = self.model
def make_image(x_T):
with torch.no_grad():
scope = choose_autocast(self.precision)
with scope(self.model.device.type):
batch = self.make_batch_sd(
init_image,
mask_image,
masked_image,
prompt=prompt,
device=model.device,
num_samples=num_samples,
)
c = model.cond_stage_model.encode(batch["txt"])
c_cat = list()
for ck in model.concat_keys:
cc = batch[ck].float()
if ck != model.masked_image_key:
bchw = [num_samples, 4, height//8, width//8]
cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
else:
cc = model.get_first_stage_encoding(model.encode_first_stage(cc))
c_cat.append(cc)
c_cat = torch.cat(c_cat, dim=1)
# cond
cond={"c_concat": [c_cat], "c_crossattn": [c]}
# uncond cond
uc_cross = model.get_unconditional_conditioning(num_samples, "")
uc_full = {"c_concat": [c_cat], "c_crossattn": [uc_cross]}
shape = [model.channels, height//8, width//8]
samples, _ = sampler.sample(
batch_size = 1,
S = steps,
x_T = x_T,
conditioning = cond,
shape = shape,
verbose = False,
unconditional_guidance_scale = cfg_scale,
unconditional_conditioning = uc_full,
eta = 1.0,
img_callback = step_callback,
threshold = threshold,
)
if self.free_gpu_mem:
self.model.model.to("cpu")
return self.sample_to_image(samples)
return make_image
def make_batch_sd(
self,
image,
mask,
masked_image,
prompt,
device,
num_samples=1):
batch = {
"image": repeat(image.to(device=device), "1 ... -> n ...", n=num_samples),
"txt": num_samples * [prompt],
"mask": repeat(mask.to(device=device), "1 ... -> n ...", n=num_samples),
"masked_image": repeat(masked_image.to(device=device), "1 ... -> n ...", n=num_samples),
}
return batch
def get_noise(self, width:int, height:int):
if self.init_latent is not None:
height = self.init_latent.shape[2]
width = self.init_latent.shape[3]
return CkptTxt2Img.get_noise(self,width,height)
def sample_to_image(self, samples)->Image.Image:
gen_result = super().sample_to_image(samples).convert('RGB')
if self.pil_image is None or self.pil_mask is None:
return gen_result
if self.pil_image.size != self.pil_mask.size:
return gen_result
corrected_result = super(CkptImg2Img, self).repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius)
return corrected_result

View File

@@ -0,0 +1,90 @@
'''
ldm.invoke.ckpt_generator.txt2img inherits from ldm.invoke.ckpt_generator
'''
import torch
import numpy as np
from ldm.invoke.ckpt_generator.base import CkptGenerator
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
import gc
class CkptTxt2Img(CkptGenerator):
def __init__(self, model, precision):
super().__init__(model, precision)
@torch.no_grad()
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
conditioning,width,height,step_callback=None,threshold=0.0,perlin=0.0,
attention_maps_callback=None,
**kwargs):
"""
Returns a function returning an image derived from the prompt and the initial image
Return value depends on the seed at the time you call it
kwargs are 'width' and 'height'
"""
self.perlin = perlin
uc, c, extra_conditioning_info = conditioning
@torch.no_grad()
def make_image(x_T):
shape = [
self.latent_channels,
height // self.downsampling_factor,
width // self.downsampling_factor,
]
if self.free_gpu_mem and self.model.model.device != self.model.device:
self.model.model.to(self.model.device)
sampler.make_schedule(ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False)
samples, _ = sampler.sample(
batch_size = 1,
S = steps,
x_T = x_T,
conditioning = c,
shape = shape,
verbose = False,
unconditional_guidance_scale = cfg_scale,
unconditional_conditioning = uc,
extra_conditioning_info = extra_conditioning_info,
eta = ddim_eta,
img_callback = step_callback,
threshold = threshold,
attention_maps_callback = attention_maps_callback,
)
if self.free_gpu_mem:
self.model.model.to('cpu')
self.model.cond_stage_model.device = 'cpu'
self.model.cond_stage_model.to('cpu')
gc.collect()
torch.cuda.empty_cache()
return self.sample_to_image(samples)
return make_image
# returns a tensor filled with random numbers from a normal distribution
def get_noise(self,width,height):
device = self.model.device
if self.use_mps_noise or device.type == 'mps':
x = torch.randn([1,
self.latent_channels,
height // self.downsampling_factor,
width // self.downsampling_factor],
dtype=self.torch_dtype(),
device='cpu').to(device)
else:
x = torch.randn([1,
self.latent_channels,
height // self.downsampling_factor,
width // self.downsampling_factor],
dtype=self.torch_dtype(),
device=device)
if self.perlin > 0.0:
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor)
return x

View File

@@ -0,0 +1,182 @@
'''
ldm.invoke.ckpt_generator.txt2img inherits from ldm.invoke.ckpt_generator
'''
import torch
import numpy as np
import math
import gc
from ldm.invoke.ckpt_generator.base import CkptGenerator
from ldm.invoke.ckpt_generator.omnibus import CkptOmnibus
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
from PIL import Image
class CkptTxt2Img2Img(CkptGenerator):
def __init__(self, model, precision):
super().__init__(model, precision)
self.init_latent = None # for get_noise()
@torch.no_grad()
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
conditioning,width,height,strength,step_callback=None,**kwargs):
"""
Returns a function returning an image derived from the prompt and the initial image
Return value depends on the seed at the time you call it
kwargs are 'width' and 'height'
"""
uc, c, extra_conditioning_info = conditioning
scale_dim = min(width, height)
scale = 512 / scale_dim
init_width = math.ceil(scale * width / 64) * 64
init_height = math.ceil(scale * height / 64) * 64
@torch.no_grad()
def make_image(x_T):
shape = [
self.latent_channels,
init_height // self.downsampling_factor,
init_width // self.downsampling_factor,
]
sampler.make_schedule(
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
)
#x = self.get_noise(init_width, init_height)
x = x_T
if self.free_gpu_mem and self.model.model.device != self.model.device:
self.model.model.to(self.model.device)
samples, _ = sampler.sample(
batch_size = 1,
S = steps,
x_T = x,
conditioning = c,
shape = shape,
verbose = False,
unconditional_guidance_scale = cfg_scale,
unconditional_conditioning = uc,
eta = ddim_eta,
img_callback = step_callback,
extra_conditioning_info = extra_conditioning_info
)
print(
f"\n>> Interpolating from {init_width}x{init_height} to {width}x{height} using DDIM sampling"
)
# resizing
samples = torch.nn.functional.interpolate(
samples,
size=(height // self.downsampling_factor, width // self.downsampling_factor),
mode="bilinear"
)
t_enc = int(strength * steps)
ddim_sampler = DDIMSampler(self.model, device=self.model.device)
ddim_sampler.make_schedule(
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
)
z_enc = ddim_sampler.stochastic_encode(
samples,
torch.tensor([t_enc-1]).to(self.model.device),
noise=self.get_noise(width,height,False)
)
# decode it
samples = ddim_sampler.decode(
z_enc,
c,
t_enc,
img_callback = step_callback,
unconditional_guidance_scale=cfg_scale,
unconditional_conditioning=uc,
extra_conditioning_info=extra_conditioning_info,
all_timesteps_count=steps
)
if self.free_gpu_mem:
self.model.model.to('cpu')
self.model.cond_stage_model.device = 'cpu'
self.model.cond_stage_model.to('cpu')
gc.collect()
torch.cuda.empty_cache()
return self.sample_to_image(samples)
# in the case of the inpainting model being loaded, the trick of
# providing an interpolated latent doesn't work, so we transiently
# create a 512x512 PIL image, upscale it, and run the inpainting
# over it in img2img mode. Because the inpaing model is so conservative
# it doesn't change the image (much)
def inpaint_make_image(x_T):
omnibus = CkptOmnibus(self.model,self.precision)
result = omnibus.generate(
prompt,
sampler=sampler,
width=init_width,
height=init_height,
step_callback=step_callback,
steps = steps,
cfg_scale = cfg_scale,
ddim_eta = ddim_eta,
conditioning = conditioning,
**kwargs
)
assert result is not None and len(result)>0,'** txt2img failed **'
image = result[0][0]
interpolated_image = image.resize((width,height),resample=Image.Resampling.LANCZOS)
print(kwargs.pop('init_image',None))
result = omnibus.generate(
prompt,
sampler=sampler,
init_image=interpolated_image,
width=width,
height=height,
seed=result[0][1],
step_callback=step_callback,
steps = steps,
cfg_scale = cfg_scale,
ddim_eta = ddim_eta,
conditioning = conditioning,
**kwargs
)
return result[0][0]
if sampler.uses_inpainting_model():
return inpaint_make_image
else:
return make_image
# returns a tensor filled with random numbers from a normal distribution
def get_noise(self,width,height,scale = True):
# print(f"Get noise: {width}x{height}")
if scale:
trained_square = 512 * 512
actual_square = width * height
scale = math.sqrt(trained_square / actual_square)
scaled_width = math.ceil(scale * width / 64) * 64
scaled_height = math.ceil(scale * height / 64) * 64
else:
scaled_width = width
scaled_height = height
device = self.model.device
if self.use_mps_noise or device.type == 'mps':
return torch.randn([1,
self.latent_channels,
scaled_height // self.downsampling_factor,
scaled_width // self.downsampling_factor],
device='cpu').to(device)
else:
return torch.randn([1,
self.latent_channels,
scaled_height // self.downsampling_factor,
scaled_width // self.downsampling_factor],
device=device)

File diff suppressed because it is too large Load Diff

View File

@@ -24,15 +24,13 @@ class HuggingFaceConceptsLibrary(object):
self.concepts_loaded = dict()
self.triggers = dict() # concept name to trigger phrase
self.concept_names = dict() # trigger phrase to concept name
self.match_trigger = re.compile('(<[a-zA-Z0-9_\- >]+>)') # trigger is slightly less restrictive than HF concept name
self.match_concept = re.compile('<([a-zA-Z0-9_\-]+)>') # HF concept name can only contain A-Za-z0-9_-
self.match_trigger = re.compile('(<[\w\- >]+>)') # trigger is slightly less restrictive than HF concept name
self.match_concept = re.compile('<([\w\-]+)>') # HF concept name can only contain A-Za-z0-9_-
def list_concepts(self, minimum_likes: int=0)->list:
def list_concepts(self)->list:
'''
Return a list of all the concepts by name, without the 'sd-concepts-library' part.
Also adds local concepts in invokeai/embeddings folder.
If minimum_likes is provided, then only concepts that have received at least that
many "likes" will be returned.
'''
local_concepts_now = self.get_local_concepts(os.path.join(self.root, 'embeddings'))
local_concepts_to_add = set(local_concepts_now).difference(set(self.local_concepts))
@@ -46,7 +44,7 @@ class HuggingFaceConceptsLibrary(object):
else:
try:
models = self.hf_api.list_models(filter=ModelFilter(model_name='sd-concepts-library/'))
self.concept_list = [a.id.split('/')[1] for a in models if a.likes>=minimum_likes]
self.concept_list = [a.id.split('/')[1] for a in models]
# when init, add all in dir. when not init, add only concepts added between init and now
self.concept_list.extend(list(local_concepts_to_add))
except Exception as e:
@@ -183,7 +181,7 @@ class HuggingFaceConceptsLibrary(object):
print(f'>> Downloading {repo_id}...',end='')
try:
for file in ('learned_embeds.bin','token_identifier.txt'):
for file in ('README.md','learned_embeds.bin','token_identifier.txt','type_of_concept.txt'):
url = hf_hub_url(repo_id, file)
request.urlretrieve(url, os.path.join(dest,file),reporthook=tally_download_size)
except ul_error.HTTPError as e:

View File

@@ -9,11 +9,10 @@ get_uc_and_c_and_ec() get the conditioned and unconditioned latent, an
import re
from typing import Union, Optional, Any
from transformers import CLIPTokenizer
from transformers import CLIPTokenizer, CLIPTextModel
from compel import Compel
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser, \
Conjunction
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
from .devices import torch_dtype
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
from ldm.invoke.globals import Globals
@@ -53,30 +52,14 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l
textual_inversion_manager=model.textual_inversion_manager,
dtype_for_device_getter=torch_dtype)
# get rid of any newline characters
prompt_string = prompt_string.replace("\n", " ")
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
positive_conjunction: Conjunction
positive_prompt: FlattenedPrompt|Blend
if legacy_blend is not None:
positive_conjunction = legacy_blend
positive_prompt = legacy_blend
else:
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
positive_prompt = positive_conjunction.prompts[0]
should_use_lora_manager = True
lora_weights = positive_conjunction.lora_weights
lora_conditions = None
if model.peft_manager:
should_use_lora_manager = model.peft_manager.should_use(lora_weights)
if not should_use_lora_manager:
model.peft_manager.set_loras(lora_weights)
if model.lora_manager and should_use_lora_manager:
lora_conditions = model.lora_manager.set_loras_conditions(lora_weights)
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
negative_prompt: FlattenedPrompt | Blend = negative_conjunction.prompts[0]
positive_prompt = Compel.parse_prompt_string(positive_prompt_string)
negative_prompt: FlattenedPrompt|Blend = Compel.parse_prompt_string(negative_prompt_string)
if log_tokens or getattr(Globals, "log_tokenization", False):
log_tokenization(positive_prompt, negative_prompt, tokenizer=tokenizer)
@@ -88,8 +71,7 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l
ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(tokens_count_including_eos_bos=tokens_count,
cross_attention_control_args=options.get(
'cross_attention_control', None),
lora_conditions=lora_conditions)
'cross_attention_control', None))
return uc, c, ec
@@ -97,14 +79,12 @@ def get_prompt_structure(prompt_string, skip_normalize_legacy_blend: bool = Fals
Union[FlattenedPrompt, Blend], FlattenedPrompt):
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
positive_conjunction: Conjunction
positive_prompt: FlattenedPrompt|Blend
if legacy_blend is not None:
positive_conjunction = legacy_blend
positive_prompt = legacy_blend
else:
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
positive_prompt = positive_conjunction.prompts[0]
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
negative_prompt: FlattenedPrompt|Blend = negative_conjunction.prompts[0]
positive_prompt = Compel.parse_prompt_string(positive_prompt_string)
negative_prompt: FlattenedPrompt|Blend = Compel.parse_prompt_string(negative_prompt_string)
return positive_prompt, negative_prompt
@@ -133,7 +113,7 @@ def get_tokens_for_prompt_object(tokenizer, parsed_prompt: FlattenedPrompt, trun
return tokens
def split_prompt_to_positive_and_negative(prompt_string_uncleaned: str):
def split_prompt_to_positive_and_negative(prompt_string_uncleaned):
unconditioned_words = ''
unconditional_regex = r'\[(.*?)\]'
unconditionals = re.findall(unconditional_regex, prompt_string_uncleaned)
@@ -221,26 +201,18 @@ def log_tokenization_for_text(text, tokenizer, display_label=None):
print(f'{discarded}\x1b[0m')
def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Conjunction]:
def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Blend]:
weighted_subprompts = split_weighted_subprompts(text, skip_normalize=skip_normalize)
if len(weighted_subprompts) <= 1:
return None
strings = [x[0] for x in weighted_subprompts]
weights = [x[1] for x in weighted_subprompts]
pp = PromptParser()
parsed_conjunctions = [pp.parse_conjunction(x) for x in strings]
flattened_prompts = []
weights = []
loras = []
for i, x in enumerate(parsed_conjunctions):
if len(x.prompts)>0:
flattened_prompts.append(x.prompts[0])
weights.append(weighted_subprompts[i][1])
if len(x.lora_weights)>0:
loras.extend(x.lora_weights)
flattened_prompts = [x.prompts[0] for x in parsed_conjunctions]
return Conjunction([Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)],
lora_weights = loras)
return Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)
def split_weighted_subprompts(text, skip_normalize=False)->list:

View File

@@ -290,7 +290,7 @@ def download_vaes():
# first the diffusers version
repo_id = "stabilityai/sd-vae-ft-mse"
args = dict(
cache_dir=global_cache_dir("hub"),
cache_dir=global_cache_dir("diffusers"),
)
if not AutoencoderKL.from_pretrained(repo_id, **args):
raise Exception(f"download of {repo_id} failed")
@@ -655,7 +655,6 @@ def initialize_rootdir(root: str, yes_to_all: bool = False):
"models",
"configs",
"embeddings",
"loras",
"text-inversion-output",
"text-inversion-training-data",
):

View File

@@ -4,19 +4,18 @@ pip install <path_to_git_source>.
'''
import os
import platform
import psutil
import requests
from rich import box, print
from rich.console import Console, group
from rich.console import Console, Group, group
from rich.panel import Panel
from rich.prompt import Prompt
from rich.style import Style
from rich.syntax import Syntax
from rich.text import Text
from ldm.invoke import __version__
INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive"
INVOKE_AI_TAG="https://github.com/invoke-ai/InvokeAI/archive/refs/tags"
INVOKE_AI_BRANCH="https://github.com/invoke-ai/InvokeAI/archive/refs/heads"
INVOKE_AI_REL="https://api.github.com/repos/invoke-ai/InvokeAI/releases"
OS = platform.uname().system
@@ -31,19 +30,6 @@ else:
def get_versions()->dict:
return requests.get(url=INVOKE_AI_REL).json()
def invokeai_is_running()->bool:
for p in psutil.process_iter():
try:
cmdline = p.cmdline()
matches = [x for x in cmdline if x.endswith(('invokeai','invokeai.exe'))]
if matches:
print(f':exclamation: [bold red]An InvokeAI instance appears to be running as process {p.pid}[/red bold]')
return True
except psutil.AccessDenied:
continue
return False
def welcome(versions: dict):
@group()
@@ -55,8 +41,7 @@ def welcome(versions: dict):
yield '[bold yellow]Options:'
yield f'''[1] Update to the latest official release ([italic]{versions[0]['tag_name']}[/italic])
[2] Update to the bleeding-edge development version ([italic]main[/italic])
[3] Manually enter the [bold]tag name[/bold] for the version you wish to update to
[4] Manually enter the [bold]branch name[/bold] for the version you wish to update to'''
[3] Manually enter the tag or branch name you wish to update'''
console.rule()
print(
@@ -74,33 +59,20 @@ def welcome(versions: dict):
def main():
versions = get_versions()
if invokeai_is_running():
print(f':exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]')
return
welcome(versions)
tag = None
branch = None
release = None
choice = Prompt.ask('Choice:',choices=['1','2','3','4'],default='1')
choice = Prompt.ask('Choice:',choices=['1','2','3'],default='1')
if choice=='1':
release = versions[0]['tag_name']
tag = versions[0]['tag_name']
elif choice=='2':
release = 'main'
tag = 'main'
elif choice=='3':
tag = Prompt.ask('Enter an InvokeAI tag name')
elif choice=='4':
branch = Prompt.ask('Enter an InvokeAI branch name')
tag = Prompt.ask('Enter an InvokeAI tag or branch name')
print(f':crossed_fingers: Upgrading to [yellow]{tag if tag else release}[/yellow]')
if release:
cmd = f'pip install {INVOKE_AI_SRC}/{release}.zip --use-pep517 --upgrade'
elif tag:
cmd = f'pip install {INVOKE_AI_TAG}/{tag}.zip --use-pep517 --upgrade'
else:
cmd = f'pip install {INVOKE_AI_BRANCH}/{branch}.zip --use-pep517 --upgrade'
print(f':crossed_fingers: Upgrading to [yellow]{tag}[/yellow]')
cmd = f'pip install {INVOKE_AI_SRC}/{tag}.zip --use-pep517'
print('')
print('')
if os.system(cmd)==0:

View File

@@ -29,13 +29,7 @@ Model_dir = "models"
Weights_dir = "ldm/stable-diffusion-v1/"
# the initial "configs" dir is now bundled in the `invokeai.configs` package
Dataset_path = None
for path in configs.__path__:
file =Path(path, "INITIAL_MODELS.yaml")
if file.exists():
Dataset_path = file
break
assert Dataset_path,f"Could not find the file INITIAL_MODELS.yaml in {configs.__path__}"
Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml"
# initial models omegaconf
Datasets = None
@@ -115,7 +109,6 @@ def install_requested_models(
model_manager.heuristic_import(
path_url_or_repo,
convert=convert_to_diffusers,
config_file_callback=_pick_configuration_file,
commit_to_conf=config_file_path
)
except KeyboardInterrupt:
@@ -145,45 +138,6 @@ def yes_or_no(prompt: str, default_yes=True):
else:
return response[0] in ("y", "Y")
# -------------------------------------
def _pick_configuration_file(checkpoint_path: Path)->Path:
print(
"""
Please select the type of this model:
[1] A Stable Diffusion v1.x ckpt/safetensors model
[2] A Stable Diffusion v1.x inpainting ckpt/safetensors model
[3] A Stable Diffusion v2.x base model (512 pixels; no 'parameterization:' in its yaml file)
[4] A Stable Diffusion v2.x v-predictive model (768 pixels; look for 'parameterization: "v"' in its yaml file)
[5] Other (you will be prompted to enter the config file path)
[Q] I have no idea! Skip the import.
""")
choices = [
global_config_dir() / 'stable-diffusion' / x
for x in [
'v1-inference.yaml',
'v1-inpainting-inference.yaml',
'v2-inference.yaml',
'v2-inference-v.yaml',
]
]
ok = False
while not ok:
try:
choice = input('select 0-5, Q > ').strip()
if choice.startswith(('q','Q')):
return
if choice == '5':
choice = Path(input('Select config file for this model> ').strip()).absolute()
ok = choice.exists()
else:
choice = choices[int(choice)-1]
ok = True
except (ValueError, IndexError):
print(f'{choice} is not a valid choice')
except EOFError:
return
return choice
# -------------------------------------
def get_root(root: str = None) -> str:
@@ -308,6 +262,7 @@ def _download_diffusion_weights(
path = download_from_hf(
model_class,
repo_id,
cache_subdir="diffusers",
safety_checker=None,
**extra_args,
)

View File

@@ -1,569 +0,0 @@
#!/usr/bin/env python
"""
Simple script to generate a file of InvokeAI prompts and settings
that scan across steps and other parameters.
"""
import argparse
import io
import json
import os
import pydoc
import re
import shutil
import sys
import time
from contextlib import redirect_stderr
from io import TextIOBase
from itertools import product
from multiprocessing import Process
from multiprocessing.connection import Connection, Pipe
from pathlib import Path
from tempfile import gettempdir
from typing import Callable, Iterable, List
import numpy as np
import yaml
from omegaconf import OmegaConf, dictconfig, listconfig
def expand_prompts(
template_file: Path,
run_invoke: bool = False,
invoke_model: str = None,
invoke_outdir: str = None,
invoke_root: str = None,
processes_per_gpu: int = 1,
):
"""
:param template_file: A YAML file containing templated prompts and args
:param run_invoke: A boolean which if True will pass expanded prompts to invokeai CLI
:param invoke_model: Name of the model to load when run_invoke is true; otherwise uses default
:param invoke_outdir: Directory for outputs when run_invoke is true; otherwise uses default
"""
if template_file.name.endswith(".json"):
with open(template_file, "r") as file:
with io.StringIO(yaml.dump(json.load(file))) as fh:
conf = OmegaConf.load(fh)
else:
conf = OmegaConf.load(template_file)
# loading here to avoid long wait for help message
import torch
torch.multiprocessing.set_start_method("spawn")
gpu_count = torch.cuda.device_count() if torch.cuda.is_available() else 1
commands = expanded_invokeai_commands(conf, run_invoke)
children = list()
try:
if run_invoke:
invokeai_args = [shutil.which("invokeai"), "--from_file", "-"]
if invoke_model:
invokeai_args.extend(("--model", invoke_model))
if invoke_root:
invokeai_args.extend(("--root", invoke_root))
if invoke_outdir:
outdir = os.path.expanduser(invoke_outdir)
invokeai_args.extend(("--outdir", outdir))
else:
outdir = gettempdir()
logdir = Path(outdir, "invokeai-batch-logs")
processes_to_launch = gpu_count * processes_per_gpu
print(
f">> Spawning {processes_to_launch} invokeai processes across {gpu_count} CUDA gpus",
file=sys.stderr,
)
print(
f'>> Outputs will be written into {invoke_outdir or "default InvokeAI outputs directory"}, and error logs will be written to {logdir}',
file=sys.stderr,
)
import ldm.invoke.CLI
print(f'DEBUG: BATCH PARENT ENVIRONMENT:')
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
parent_conn, child_conn = Pipe()
children = set()
for i in range(processes_to_launch):
p = Process(
target=_run_invoke,
kwargs=dict(
entry_point=ldm.invoke.CLI.main,
conn_in=child_conn,
conn_out=parent_conn,
args=invokeai_args,
gpu=i % gpu_count,
logdir=logdir,
),
)
p.start()
children.add(p)
child_conn.close()
sequence = 0
for command in commands:
sequence += 1
format = _get_fn_format(outdir, sequence)
parent_conn.send_bytes(
(command + f' --fnformat="{format}"').encode('utf-8')
)
parent_conn.close()
else:
for command in commands:
print(command)
except KeyboardInterrupt:
for p in children:
p.terminate()
def _dummy_cli_main():
counter = 0
while line := sys.stdin.readline():
print(f'[{counter}] {os.getpid()} got command {line.rstrip()}\n')
counter += 1
time.sleep(1)
def _get_fn_format(directory:str, sequence:int)->str:
"""
Get a filename that doesn't exceed filename length restrictions
on the current platform.
"""
try:
max_length = os.pathconf(directory,'PC_NAME_MAX')
except:
max_length = 255
prefix = f'dp.{sequence:04}.'
suffix = '.png'
max_length -= len(prefix)+len(suffix)
return f'{prefix}{{prompt:0.{max_length}}}{suffix}'
class MessageToStdin(object):
def __init__(self, connection: Connection):
self.connection = connection
self.linebuffer = list()
def readline(self) -> str:
try:
if len(self.linebuffer) == 0:
message = self.connection.recv_bytes().decode('utf-8')
self.linebuffer = message.split("\n")
result = self.linebuffer.pop(0)
return result
except EOFError:
return None
class FilterStream(object):
def __init__(
self, stream: TextIOBase, include: re.Pattern = None, exclude: re.Pattern = None
):
self.stream = stream
self.include = include
self.exclude = exclude
def write(self, data: str):
if self.include and self.include.match(data):
self.stream.write(data)
self.stream.flush()
elif self.exclude and not self.exclude.match(data):
self.stream.write(data)
self.stream.flush()
def flush(self):
self.stream.flush()
def _run_invoke(
entry_point: Callable,
conn_in: Connection,
conn_out: Connection,
args: List[str],
logdir: Path,
gpu: int = 0,
):
pid = os.getpid()
logdir.mkdir(parents=True, exist_ok=True)
logfile = Path(logdir, f'{time.strftime("%Y-%m-%d_%H-%M-%S")}-pid={pid}.txt')
print(
f">> Process {pid} running on GPU {gpu}; logging to {logfile}", file=sys.stderr
)
conn_out.close()
os.environ["CUDA_VISIBLE_DEVICES"] = f"{gpu}"
sys.argv = args
sys.stdin = MessageToStdin(conn_in)
# sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]"))
# with open(logfile, "w") as stderr, redirect_stderr(stderr):
entry_point()
def _filter_output(stream: TextIOBase):
while line := stream.readline():
if re.match("^\[\d+\]", line):
print(line)
def main():
parser = argparse.ArgumentParser(
description=HELP,
)
parser.add_argument(
"template_file",
type=Path,
nargs="?",
help="path to a template file, use --example to generate an example file",
)
parser.add_argument(
"--example",
action="store_true",
default=False,
help=f'Print an example template file in YAML format. Use "{sys.argv[0]} --example > example.yaml" to save output to a file',
)
parser.add_argument(
"--json-example",
action="store_true",
default=False,
help=f'Print an example template file in json format. Use "{sys.argv[0]} --json-example > example.json" to save output to a file',
)
parser.add_argument(
"--instructions",
"-i",
dest="instructions",
action="store_true",
default=False,
help="Print verbose instructions.",
)
parser.add_argument(
"--invoke",
action="store_true",
help="Execute invokeai using specified optional --model, --processes_per_gpu and --outdir",
)
parser.add_argument(
"--model",
help="Feed the generated prompts to the invokeai CLI using the indicated model. Will be overriden by a model: section in template file.",
)
parser.add_argument(
"--outdir", type=Path, help="Write images and log into indicated directory"
)
parser.add_argument(
"--processes_per_gpu",
type=int,
default=1,
help="When executing invokeai, how many parallel processes to execute per CUDA GPU.",
)
parser.add_argument(
'--root_dir',
default=None,
help='Path to directory containing "models", "outputs" and "configs". If not present will read from environment variable INVOKEAI_ROOT. Defaults to ~/invokeai' )
opt = parser.parse_args()
if opt.example:
print(EXAMPLE_TEMPLATE_FILE)
sys.exit(0)
if opt.json_example:
print(_yaml_to_json(EXAMPLE_TEMPLATE_FILE))
sys.exit(0)
if opt.instructions:
pydoc.pager(INSTRUCTIONS)
sys.exit(0)
if not opt.template_file:
parser.print_help()
sys.exit(-1)
expand_prompts(
template_file=opt.template_file,
run_invoke=opt.invoke,
invoke_model=opt.model,
invoke_outdir=opt.outdir,
invoke_root=opt.root,
processes_per_gpu=opt.processes_per_gpu,
)
def expanded_invokeai_commands(
conf: OmegaConf, always_switch_models: bool = False
) -> List[List[str]]:
models = expand_values(conf.get("model"))
steps = expand_values(conf.get("steps")) or [30]
cfgs = expand_values(conf.get("cfg")) or [7.5]
samplers = expand_values(conf.get("sampler")) or ["ddim"]
seeds = expand_values(conf.get("seed")) or [0]
dimensions = expand_values(conf.get("dimensions")) or ["512x512"]
init_img = expand_values(conf.get("init_img")) or [""]
perlin = expand_values(conf.get("perlin")) or [0]
threshold = expand_values(conf.get("threshold")) or [0]
strength = expand_values(conf.get("strength")) or [0.75]
prompts = expand_prompt(conf.get("prompt")) or ["banana sushi"]
cross_product = product(
*[
models,
seeds,
prompts,
samplers,
cfgs,
steps,
perlin,
threshold,
init_img,
strength,
dimensions,
]
)
previous_model = None
result = list()
for p in cross_product:
(
model,
seed,
prompt,
sampler,
cfg,
step,
perlin,
threshold,
init_img,
strength,
dimensions,
) = tuple(p)
(width, height) = dimensions.split("x")
switch_args = (
f"!switch {model}\n"
if always_switch_models or previous_model != model
else ""
)
image_args = f"-I{init_img} -f{strength}" if init_img else ""
command = f"{switch_args}{prompt} -S{seed} -A{sampler} -C{cfg} -s{step} {image_args} --perlin={perlin} --threshold={threshold} -W{width} -H{height}"
result.append(command)
previous_model = model
return result
def expand_prompt(
stanza: str | dict | listconfig.ListConfig | dictconfig.DictConfig,
) -> list | range:
if not stanza:
return None
if isinstance(stanza, listconfig.ListConfig):
return stanza
if isinstance(stanza, str):
return [stanza]
if not isinstance(stanza, dictconfig.DictConfig):
raise ValueError(f"Unrecognized template: {stanza}")
if not (template := stanza.get("template")):
raise KeyError('"prompt" section must contain a "template" definition')
fragment_labels = re.findall("{([^{}]+?)}", template)
if len(fragment_labels) == 0:
return [template]
fragments = [[{x: y} for y in stanza.get(x)] for x in fragment_labels]
dicts = merge(product(*fragments))
return [template.format(**x) for x in dicts]
def merge(dicts: Iterable) -> List[dict]:
result = list()
for x in dicts:
to_merge = dict()
for item in x:
to_merge = to_merge | item
result.append(to_merge)
return result
def expand_values(stanza: str | dict | listconfig.ListConfig) -> list | range:
if not stanza:
return None
if isinstance(stanza, listconfig.ListConfig):
return stanza
elif match := re.match("^(-?\d+);(-?\d+)(;(\d+))?", str(stanza)):
(start, stop, step) = (
int(match.group(1)),
int(match.group(2)),
int(match.group(4)) or 1,
)
return range(start, stop + step, step)
elif match := re.match("^(-?[\d.]+);(-?[\d.]+)(;([\d.]+))?", str(stanza)):
(start, stop, step) = (
float(match.group(1)),
float(match.group(2)),
float(match.group(4)) or 1.0,
)
return np.arange(start, stop + step, step).tolist()
else:
return [stanza]
def _yaml_to_json(yaml_input: str) -> str:
"""
Converts a yaml string into a json string. Used internally
to generate the example template file.
"""
with io.StringIO(yaml_input) as yaml_in:
data = yaml.safe_load(yaml_in)
return json.dumps(data, indent=2)
HELP = """
This script takes a prompt template file that contains multiple
alternative values for the prompt and its generation arguments (such
as steps). It then expands out the prompts using all combinations of
arguments and either prints them to the terminal's standard output, or
feeds the prompts directly to the invokeai command-line interface.
Call this script again with --instructions (-i) for verbose instructions.
"""
INSTRUCTIONS = f"""
== INTRODUCTION ==
This script takes a prompt template file that contains multiple
alternative values for the prompt and its generation arguments (such
as steps). It then expands out the prompts using all combinations of
arguments and either prints them to the terminal's standard output, or
feeds the prompts directly to the invokeai command-line interface.
If the optional --invoke argument is provided, then the generated
prompts will be fed directly to invokeai for image generation. You
will likely want to add the --outdir option in order to save the image
files to their own folder.
{sys.argv[0]} --invoke --outdir=/tmp/outputs my_template.yaml
If --invoke isn't specified, the expanded prompts will be printed to
output. You can capture them to a file for inspection and editing this
way:
{sys.argv[0]} my_template.yaml > prompts.txt
And then feed them to invokeai this way:
invokeai --outdir=/tmp/outputs < prompts.txt
Note that after invokeai finishes processing the list of prompts, the
output directory will contain a markdown file named `log.md`
containing annotated images. You can open this file using an e-book
reader such as the cross-platform Calibre eBook reader
(https://calibre-ebook.com/).
== FORMAT OF THE TEMPLATES FILE ==
This will generate an example template file that you can get
started with:
{sys.argv[0]} --example > example.yaml
An excerpt from the top of this file looks like this:
model:
- stable-diffusion-1.5
- stable-diffusion-2.1-base
steps: 30;50;1 # start steps at 30 and go up to 50, incrementing by 1 each time
seed: 50 # fixed constant, seed=50
cfg: # list of CFG values to try
- 7
- 8
- 12
prompt: a walk in the park # constant value
In more detail, the template file can one or more of the
following sections:
- model:
- steps:
- seed:
- cfg:
- sampler:
- prompt:
- init_img:
- perlin:
- threshold:
- strength
- Each section can have a constant value such as this:
steps: 50
- Or a range of numeric values in the format:
steps: <start>;<stop>;<step> (note semicolon, not colon!)
- Or a list of values in the format:
- value1
- value2
- value3
The "prompt:" section is special. It can accept a constant value:
prompt: a walk in the woods in the style of donatello
Or it can accept a list of prompts:
prompt:
- a walk in the woods
- a walk on the beach
Or it can accept a templated list of prompts. These allow you to
define a series of phrases, each of which is a list. You then combine
them together into a prompt template in this way:
prompt:
style:
- oil painting
- watercolor
- comic book
- studio photography
subject:
- sunny meadow in the mountains
- gathering storm in the mountains
template: a {{subject}} in the style of {{style}}
In the example above, the phrase names "style" and "subject" are
examples only. You can use whatever you like. However, the "template:"
field is required. The output will be:
"a sunny meadow in the mountains in the style of an oil painting"
"a sunny meadow in the mountains in the style of watercolor masterpiece"
...
"a gathering storm in the mountains in the style of an ink sketch"
== SUPPORT FOR JSON FORMAT ==
For those who prefer the JSON format, this script will accept JSON
template files as well. Please run "{sys.argv[0]} --json-example"
to print out a version of the example template file in json format.
You may save it to disk and use it as a starting point for your own
template this way:
{sys.argv[0]} --json-example > template.json
"""
EXAMPLE_TEMPLATE_FILE = """
model: stable-diffusion-1.5
steps: 30;50;10
seed: 50
dimensions: 512x512
perlin: 0.0
threshold: 0
cfg:
- 7
- 12
sampler:
- k_euler_a
- k_lms
prompt:
style:
- oil painting
- watercolor
location:
- the mountains
- a desert
object:
- luxurious dwelling
- crude tent
template: a {object} in {location}, in the style of {style}
"""
if __name__ == "__main__":
main()

View File

@@ -29,8 +29,6 @@ from typing_extensions import ParamSpec
from ldm.invoke.globals import Globals
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings
from ldm.modules.textual_inversion_manager import TextualInversionManager
from ldm.modules.lora_manager import LoraManager
from ldm.modules.peft_manager import PeftManager
from ..devices import normalize_device, CPU_DEVICE
from ..offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup
from ...models.diffusion.cross_attention_map_saving import AttentionMapSaver
@@ -291,14 +289,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
self.lora_manager = LoraManager(self)
self.peft_manager = PeftManager()
self.invokeai_diffuser = InvokeAIDiffuserComponent(self.unet, self._unet_forward, is_running_diffusers=True)
use_full_precision = (precision == 'float32' or precision == 'autocast')
self.textual_inversion_manager = TextualInversionManager(tokenizer=self.tokenizer,
text_encoder=self.text_encoder,
full_precision=use_full_precision)
# InvokeAI's interface for text embeddings and whatnot
self.embeddings_provider = EmbeddingsProvider(
tokenizer=self.tokenizer,

View File

@@ -255,8 +255,8 @@ class Inpaint(Img2Img):
pipeline.scheduler = sampler
# todo: support cross-attention control
uc, c, extra_conditioning_info = conditioning
conditioning_data = (ConditioningData(uc, c, cfg_scale, extra_conditioning_info)
uc, c, _ = conditioning
conditioning_data = (ConditioningData(uc, c, cfg_scale)
.add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta))

View File

@@ -23,7 +23,6 @@ Globals = Namespace()
Globals.initfile = 'invokeai.init'
Globals.models_file = 'models.yaml'
Globals.models_dir = 'models'
Globals.lora_models_dir = 'loras'
Globals.config_dir = 'configs'
Globals.autoscan_dir = 'weights'
Globals.converted_ckpts_dir = 'converted_ckpts'
@@ -62,7 +61,7 @@ Globals.sequential_guidance = False
Globals.full_precision = False
# whether we should convert ckpt files into diffusers models on the fly
Globals.ckpt_convert = True
Globals.ckpt_convert = False
# logging tokenization everywhere
Globals.log_tokenization = False
@@ -76,11 +75,6 @@ def global_config_dir()->Path:
def global_models_dir()->Path:
return Path(Globals.root, Globals.models_dir)
def global_lora_models_dir()->Path:
return Path(Globals.lora_models_dir) \
if Path(Globals.lora_models_dir).is_absolute() \
else Path(Globals.root, Globals.lora_models_dir)
def global_autoscan_dir()->Path:
return Path(Globals.root, Globals.autoscan_dir)
@@ -94,13 +88,16 @@ def global_cache_dir(subdir:Union[str,Path]='')->Path:
'''
Returns Path to the model cache directory. If a subdirectory
is provided, it will be appended to the end of the path, allowing
for Hugging Face-style conventions. Currently, Hugging Face has
moved all models into the "hub" subfolder, so for any pretrained
HF model, use:
for huggingface-style conventions:
global_cache_dir('diffusers')
global_cache_dir('hub')
The legacy location for transformers used to be global_cache_dir('transformers')
and global_cache_dir('diffusers') for diffusers.
Current HuggingFace documentation (mid-Jan 2023) indicates that
transformers models will be cached into a "transformers" subdirectory,
but in practice they seem to go into "hub". But if needed:
global_cache_dir('transformers')
One other caveat is that HuggingFace is moving some diffusers models
into the "hub" subdirectory as well, so this will need to be revisited
from time to time.
'''
home: str = os.getenv('HF_HOME')

View File

@@ -1,29 +0,0 @@
#!/usr/bin/env python
import sys
import json
from ldm.invoke.pngwriter import retrieve_metadata
def main():
if len(sys.argv) < 2:
print("Usage: file2prompt.py <file1.png> <file2.png> <file3.png>...")
print("This script opens up the indicated invoke.py-generated PNG file(s) and prints out their metadata.")
exit(-1)
filenames = sys.argv[1:]
for f in filenames:
try:
metadata = retrieve_metadata(f)
print(f'{f}:\n',json.dumps(metadata['sd-metadata'], indent=4))
except FileNotFoundError:
sys.stderr.write(f'{f} not found\n')
continue
except PermissionError:
sys.stderr.write(f'{f} could not be opened due to inadequate permissions\n')
continue
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass

View File

@@ -437,10 +437,10 @@ def main():
args = _parse_args()
global_set_root(args.root_dir)
cache_dir = str(global_cache_dir("hub"))
cache_dir = str(global_cache_dir("diffusers"))
os.environ[
"HF_HOME"
] = str(global_cache_dir()) # because not clear the merge pipeline is honoring cache_dir
] = cache_dir # because not clear the merge pipeline is honoring cache_dir
args.cache_dir = cache_dir
try:

Some files were not shown because too many files have changed in this diff Show More