mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-16 08:28:11 -05:00
Compare commits
43 Commits
feat/docs_
...
feat/workf
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b1762d8da | ||
|
|
b056a9c181 | ||
|
|
f56c47b550 | ||
|
|
f8e35aec7b | ||
|
|
10cf10c16c | ||
|
|
7d4a78e470 | ||
|
|
37c87affd0 | ||
|
|
3863bd9da3 | ||
|
|
4b2e3aa54d | ||
|
|
d699efa5bc | ||
|
|
b9a1374b8f | ||
|
|
411ea75861 | ||
|
|
375c9a1c20 | ||
|
|
907340b1e1 | ||
|
|
0f32d260b7 | ||
|
|
92bc04dc87 | ||
|
|
929b1f4a41 | ||
|
|
6d7b4b8e8a | ||
|
|
4a14ee0e01 | ||
|
|
f268ea4e39 | ||
|
|
78face3481 | ||
|
|
5a0e8261bf | ||
|
|
0447fa2dcb | ||
|
|
4fd163698c | ||
|
|
224438a108 | ||
|
|
81d2d5abae | ||
|
|
734e871e8f | ||
|
|
b0350e9bc8 | ||
|
|
0a25efd054 | ||
|
|
46905175a9 | ||
|
|
11085783ef | ||
|
|
3d57c14bb3 | ||
|
|
18f3190857 | ||
|
|
fcc056fe6a | ||
|
|
c1bfc1f47b | ||
|
|
14bf87e5e7 | ||
|
|
715ce8538b | ||
|
|
1987bc9cc5 | ||
|
|
0b079df4ae | ||
|
|
a514c9e28b | ||
|
|
8cf2806489 | ||
|
|
eb446471cc | ||
|
|
7392d07331 |
15
.github/pull_request_template.md
vendored
15
.github/pull_request_template.md
vendored
@@ -42,21 +42,6 @@ Please provide steps on how to test changes, any hardware or
|
||||
software specifications as well as any other pertinent information.
|
||||
-->
|
||||
|
||||
## Merge Plan
|
||||
|
||||
<!--
|
||||
A merge plan describes how this PR should be handled after it is approved.
|
||||
|
||||
Example merge plans:
|
||||
- "This PR can be merged when approved"
|
||||
- "This must be squash-merged when approved"
|
||||
- "DO NOT MERGE - I will rebase and tidy commits before merging"
|
||||
- "#dev-chat on discord needs to be advised of this change when it is merged"
|
||||
|
||||
A merge plan is particularly important for large PRs or PRs that touch the
|
||||
database in any way.
|
||||
-->
|
||||
|
||||
## Added/updated tests?
|
||||
|
||||
- [ ] Yes
|
||||
|
||||
24
.github/workflows/lint-frontend.yml
vendored
24
.github/workflows/lint-frontend.yml
vendored
@@ -22,22 +22,12 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Setup Node 18
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: '18'
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v2
|
||||
with:
|
||||
version: '8.12.1'
|
||||
- name: Install dependencies
|
||||
run: 'pnpm install --prefer-frozen-lockfile'
|
||||
- name: Typescript
|
||||
run: 'pnpm run lint:tsc'
|
||||
- name: Madge
|
||||
run: 'pnpm run lint:madge'
|
||||
- name: ESLint
|
||||
run: 'pnpm run lint:eslint'
|
||||
- name: Prettier
|
||||
run: 'pnpm run lint:prettier'
|
||||
- uses: actions/checkout@v3
|
||||
- run: 'yarn install --frozen-lockfile'
|
||||
- run: 'yarn run lint:tsc'
|
||||
- run: 'yarn run lint:madge'
|
||||
- run: 'yarn run lint:eslint'
|
||||
- run: 'yarn run lint:prettier'
|
||||
|
||||
50
.github/workflows/pypi-release.yml
vendored
50
.github/workflows/pypi-release.yml
vendored
@@ -1,15 +1,13 @@
|
||||
name: PyPI Release
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'invokeai/version/invokeai_version.py'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
publish_package:
|
||||
description: 'Publish build on PyPi? [true/false]'
|
||||
required: true
|
||||
default: 'false'
|
||||
|
||||
jobs:
|
||||
build-and-release:
|
||||
release:
|
||||
if: github.repository == 'invoke-ai/InvokeAI'
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
@@ -17,43 +15,19 @@ jobs:
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
|
||||
TWINE_NON_INTERACTIVE: 1
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Node 18
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '18'
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v2
|
||||
with:
|
||||
version: '8.12.1'
|
||||
|
||||
- name: Install frontend dependencies
|
||||
run: pnpm install --prefer-frozen-lockfile
|
||||
working-directory: invokeai/frontend/web
|
||||
|
||||
- name: Build frontend
|
||||
run: pnpm run build
|
||||
working-directory: invokeai/frontend/web
|
||||
|
||||
- name: Install python dependencies
|
||||
- name: install deps
|
||||
run: pip install --upgrade build twine
|
||||
|
||||
- name: Build python package
|
||||
- name: build package
|
||||
run: python3 -m build
|
||||
|
||||
- name: Upload build as workflow artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: dist
|
||||
|
||||
- name: Check distribution
|
||||
- name: check distribution
|
||||
run: twine check dist/*
|
||||
|
||||
- name: Check PyPI versions
|
||||
- name: check PyPI versions
|
||||
if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/')
|
||||
run: |
|
||||
pip install --upgrade requests
|
||||
@@ -62,6 +36,6 @@ jobs:
|
||||
EXISTS=scripts.pypi_helper.local_on_pypi(); \
|
||||
print(f'PACKAGE_EXISTS={EXISTS}')" >> $GITHUB_ENV
|
||||
|
||||
- name: Publish build on PyPi
|
||||
if: env.PACKAGE_EXISTS == 'False' && env.TWINE_PASSWORD != '' && github.event.inputs.publish_package == 'true'
|
||||
- name: upload package
|
||||
if: env.PACKAGE_EXISTS == 'False' && env.TWINE_PASSWORD != ''
|
||||
run: twine upload dist/*
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -16,7 +16,7 @@ __pycache__/
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
# dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
@@ -187,4 +187,3 @@ installer/install.bat
|
||||
installer/install.sh
|
||||
installer/update.bat
|
||||
installer/update.sh
|
||||
installer/InvokeAI-Installer/
|
||||
|
||||
33
Makefile
33
Makefile
@@ -1,20 +1,6 @@
|
||||
# simple Makefile with scripts that are otherwise hard to remember
|
||||
# to use, run from the repo root `make <command>`
|
||||
|
||||
default: help
|
||||
|
||||
help:
|
||||
@echo Developer commands:
|
||||
@echo
|
||||
@echo "ruff Run ruff, fixing any safely-fixable errors and formatting"
|
||||
@echo "ruff-unsafe Run ruff, fixing all fixable errors and formatting"
|
||||
@echo "mypy Run mypy using the config in pyproject.toml to identify type mismatches and other coding errors"
|
||||
@echo "mypy-all Run mypy ignoring the config in pyproject.tom but still ignoring missing imports"
|
||||
@echo "frontend-build Build the frontend in order to run on localhost:9090"
|
||||
@echo "frontend-dev Run the frontend in developer mode on localhost:5173"
|
||||
@echo "installer-zip Build the installer .zip file for the current version"
|
||||
@echo "tag-release Tag the GitHub repository with the current version (use at release time only!)"
|
||||
|
||||
# Runs ruff, fixing any safely-fixable errors and formatting
|
||||
ruff:
|
||||
ruff check . --fix
|
||||
@@ -32,21 +18,4 @@ mypy:
|
||||
# Runs mypy, ignoring the config in pyproject.toml but still ignoring missing (untyped) imports
|
||||
# (many files are ignored by the config, so this is useful for checking all files)
|
||||
mypy-all:
|
||||
mypy scripts/invokeai-web.py --config-file= --ignore-missing-imports
|
||||
|
||||
# Build the frontend
|
||||
frontend-build:
|
||||
cd invokeai/frontend/web && pnpm build
|
||||
|
||||
# Run the frontend in dev mode
|
||||
frontend-dev:
|
||||
cd invokeai/frontend/web && pnpm dev
|
||||
|
||||
# Installer zip file
|
||||
installer-zip:
|
||||
cd installer && ./create_installer.sh
|
||||
|
||||
# Tag the release
|
||||
tag-release:
|
||||
cd installer && ./tag_release.sh
|
||||
|
||||
mypy scripts/invokeai-web.py --config-file= --ignore-missing-imports
|
||||
@@ -125,8 +125,8 @@ and go to http://localhost:9090.
|
||||
|
||||
You must have Python 3.10 through 3.11 installed on your machine. Earlier or
|
||||
later versions are not supported.
|
||||
Node.js also needs to be installed along with `pnpm` (can be installed with
|
||||
the command `npm install -g pnpm` if needed)
|
||||
Node.js also needs to be installed along with yarn (can be installed with
|
||||
the command `npm install -g yarn` if needed)
|
||||
|
||||
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
|
||||
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
|
||||
|
||||
@@ -59,16 +59,14 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
|
||||
# #### Build the Web UI ------------------------------------
|
||||
|
||||
FROM node:18-slim AS web-builder
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
ENV PATH="$PNPM_HOME:$PATH"
|
||||
RUN corepack enable
|
||||
|
||||
FROM node:18 AS web-builder
|
||||
WORKDIR /build
|
||||
COPY invokeai/frontend/web/ ./
|
||||
RUN --mount=type=cache,target=/pnpm/store \
|
||||
pnpm install --frozen-lockfile
|
||||
RUN pnpm run build
|
||||
RUN --mount=type=cache,target=/usr/lib/node_modules \
|
||||
npm install --include dev
|
||||
RUN --mount=type=cache,target=/usr/lib/node_modules \
|
||||
yarn vite build
|
||||
|
||||
|
||||
#### Runtime stage ---------------------------------------
|
||||
|
||||
@@ -102,8 +100,6 @@ ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||
ENV INVOKEAI_ROOT=/invokeai
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
|
||||
ENV CONTAINER_UID=${CONTAINER_UID:-1000}
|
||||
ENV CONTAINER_GID=${CONTAINER_GID:-1000}
|
||||
|
||||
# --link requires buldkit w/ dockerfile syntax 1.4
|
||||
COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC}
|
||||
@@ -121,7 +117,7 @@ WORKDIR ${INVOKEAI_SRC}
|
||||
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
||||
RUN python3 -c "from patchmatch import patch_match"
|
||||
|
||||
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
||||
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R 1000:1000 ${INVOKEAI_ROOT}
|
||||
|
||||
COPY docker/docker-entrypoint.sh ./
|
||||
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
||||
|
||||
@@ -23,7 +23,7 @@ This is done via Docker Desktop preferences
|
||||
1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to:
|
||||
a. the desired location of the InvokeAI runtime directory, or
|
||||
b. an existing, v3.0.0 compatible runtime directory.
|
||||
1. Execute `run.sh`
|
||||
1. `docker compose up`
|
||||
|
||||
The image will be built automatically if needed.
|
||||
|
||||
@@ -39,7 +39,7 @@ The Docker daemon on the system must be already set up to use the GPU. In case o
|
||||
|
||||
## Customize
|
||||
|
||||
Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `run.sh`, your custom values will be used.
|
||||
Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `docker compose up`, your custom values will be used.
|
||||
|
||||
You can also set these values in `docker-compose.yml` directly, but `.env` will help avoid conflicts when code is updated.
|
||||
|
||||
|
||||
11
docker/build.sh
Executable file
11
docker/build.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
build_args=""
|
||||
|
||||
[[ -f ".env" ]] && build_args=$(awk '$1 ~ /\=[^$]/ {print "--build-arg " $0 " "}' .env)
|
||||
|
||||
echo "docker compose build args:"
|
||||
echo $build_args
|
||||
|
||||
docker compose build $build_args
|
||||
@@ -2,8 +2,23 @@
|
||||
|
||||
version: '3.8'
|
||||
|
||||
x-invokeai: &invokeai
|
||||
services:
|
||||
invokeai:
|
||||
image: "local/invokeai:latest"
|
||||
# edit below to run on a container runtime other than nvidia-container-runtime.
|
||||
# not yet tested with rocm/AMD GPUs
|
||||
# Comment out the "deploy" section to run on CPU only
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
# For AMD support, comment out the deploy section above and uncomment the devices section below:
|
||||
#devices:
|
||||
# - /dev/kfd:/dev/kfd
|
||||
# - /dev/dri:/dev/dri
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: docker/Dockerfile
|
||||
@@ -35,27 +50,3 @@ x-invokeai: &invokeai
|
||||
# - |
|
||||
# invokeai-model-install --yes --default-only --config_file ${INVOKEAI_ROOT}/config_custom.yaml
|
||||
# invokeai-nodes-web --host 0.0.0.0
|
||||
|
||||
services:
|
||||
invokeai-nvidia:
|
||||
<<: *invokeai
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
|
||||
invokeai-cpu:
|
||||
<<: *invokeai
|
||||
profiles:
|
||||
- cpu
|
||||
|
||||
invokeai-rocm:
|
||||
<<: *invokeai
|
||||
devices:
|
||||
- /dev/kfd:/dev/kfd
|
||||
- /dev/dri:/dev/dri
|
||||
profiles:
|
||||
- rocm
|
||||
|
||||
@@ -1,28 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
run() {
|
||||
local scriptdir=$(dirname "${BASH_SOURCE[0]}")
|
||||
cd "$scriptdir" || exit 1
|
||||
# This script is provided for backwards compatibility with the old docker setup.
|
||||
# it doesn't do much aside from wrapping the usual docker compose CLI.
|
||||
|
||||
local build_args=""
|
||||
local profile=""
|
||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||
cd "$SCRIPTDIR" || exit 1
|
||||
|
||||
[[ -f ".env" ]] &&
|
||||
build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) &&
|
||||
profile="$(awk -F '=' '/GPU_DRIVER/ {print $2}' .env)"
|
||||
|
||||
local service_name="invokeai-$profile"
|
||||
|
||||
printf "%s\n" "docker compose build args:"
|
||||
printf "%s\n" "$build_args"
|
||||
|
||||
docker compose build $build_args
|
||||
unset build_args
|
||||
|
||||
printf "%s\n" "starting service $service_name"
|
||||
docker compose --profile "$profile" up -d "$service_name"
|
||||
docker compose logs -f
|
||||
}
|
||||
|
||||
run
|
||||
docker compose up -d
|
||||
docker compose logs -f
|
||||
|
||||
@@ -11,7 +11,7 @@ complex functionality.
|
||||
|
||||
InvokeAI Nodes can be found in the `invokeai/app/invocations` directory. These can be used as examples to create your own nodes.
|
||||
|
||||
New nodes should be added to a subfolder in the `nodes` directory found at the root level of the InvokeAI installation location. Nodes added to this folder will be imported upon application startup.
|
||||
New nodes should be added to a subfolder in `nodes` direction found at the root level of the InvokeAI installation location. Nodes added to this folder will be able to be used upon application startup.
|
||||
|
||||
Example `nodes` subfolder structure:
|
||||
```py
|
||||
|
||||
@@ -10,36 +10,40 @@ model. These are the:
|
||||
tracks the type of the model, its provenance, and where it can be
|
||||
found on disk.
|
||||
|
||||
* _ModelLoadServiceBase_ Responsible for loading a model from disk
|
||||
into RAM and VRAM and getting it ready for inference.
|
||||
|
||||
* _DownloadQueueServiceBase_ A multithreaded downloader responsible
|
||||
for downloading models from a remote source to disk. The download
|
||||
queue has special methods for downloading repo_id folders from
|
||||
Hugging Face, as well as discriminating among model versions in
|
||||
Civitai, but can be used for arbitrary content.
|
||||
|
||||
* _ModelInstallServiceBase_ A service for installing models to
|
||||
disk. It uses `DownloadQueueServiceBase` to download models and
|
||||
their metadata, and `ModelRecordServiceBase` to store that
|
||||
information. It is also responsible for managing the InvokeAI
|
||||
`models` directory and its contents.
|
||||
|
||||
* _DownloadQueueServiceBase_ (**CURRENTLY UNDER DEVELOPMENT - NOT IMPLEMENTED**)
|
||||
A multithreaded downloader responsible
|
||||
for downloading models from a remote source to disk. The download
|
||||
queue has special methods for downloading repo_id folders from
|
||||
Hugging Face, as well as discriminating among model versions in
|
||||
Civitai, but can be used for arbitrary content.
|
||||
|
||||
* _ModelLoadServiceBase_ (**CURRENTLY UNDER DEVELOPMENT - NOT IMPLEMENTED**)
|
||||
Responsible for loading a model from disk
|
||||
into RAM and VRAM and getting it ready for inference.
|
||||
|
||||
|
||||
## Location of the Code
|
||||
|
||||
All four of these services can be found in
|
||||
`invokeai/app/services` in the following directories:
|
||||
|
||||
* `invokeai/app/services/model_records/`
|
||||
* `invokeai/app/services/downloads/`
|
||||
* `invokeai/app/services/model_loader/`
|
||||
* `invokeai/app/services/model_install/`
|
||||
* `invokeai/app/services/model_loader/` (**under development**)
|
||||
* `invokeai/app/services/downloads/`(**under development**)
|
||||
|
||||
With the exception of the install service, each of these is a thin
|
||||
shell around a corresponding implementation located in
|
||||
`invokeai/backend/model_manager`. The main difference between the
|
||||
modules found in app services and those in the backend folder is that
|
||||
the former add support for event reporting and are more tied to the
|
||||
needs of the InvokeAI API.
|
||||
|
||||
Code related to the FastAPI web API can be found in
|
||||
`invokeai/app/api/routers/model_records.py`.
|
||||
`invokeai/app/api/routers/models.py`.
|
||||
|
||||
***
|
||||
|
||||
@@ -161,6 +165,10 @@ of the fields, including `name`, `model_type` and `base_model`, are
|
||||
shared between `ModelConfigBase` and `ModelBase`, and this is a
|
||||
potential source of confusion.
|
||||
|
||||
** TO DO: ** The `ModelBase` code needs to be revised to reduce the
|
||||
duplication of similar classes and to support using the `key` as the
|
||||
primary model identifier.
|
||||
|
||||
## Reading and Writing Model Configuration Records
|
||||
|
||||
The `ModelRecordService` provides the ability to retrieve model
|
||||
@@ -354,7 +362,7 @@ model and pass its key to `get_model()`.
|
||||
Several methods allow you to create and update stored model config
|
||||
records.
|
||||
|
||||
#### add_model(key, config) -> AnyModelConfig:
|
||||
#### add_model(key, config) -> ModelConfigBase:
|
||||
|
||||
Given a key and a configuration, this will add the model's
|
||||
configuration record to the database. `config` can either be a subclass of
|
||||
@@ -378,356 +386,27 @@ fields to be updated. This will return an `AnyModelConfig` on success,
|
||||
or raise `InvalidModelConfigException` or `UnknownModelException`
|
||||
exceptions on failure.
|
||||
|
||||
***TO DO:*** Investigate why `update_model()` returns an
|
||||
`AnyModelConfig` while `add_model()` returns a `ModelConfigBase`.
|
||||
|
||||
### rename_model(key, new_name) -> ModelConfigBase:
|
||||
|
||||
This is a special case of `update_model()` for the use case of
|
||||
changing the model's name. It is broken out because there are cases in
|
||||
which the InvokeAI application wants to synchronize the model's name
|
||||
with its path in the `models` directory after changing the name, type
|
||||
or base. However, when using the ModelRecordService directly, the call
|
||||
is equivalent to:
|
||||
|
||||
```
|
||||
store.rename_model(key, {'name': 'new_name'})
|
||||
```
|
||||
|
||||
***TO DO:*** Investigate why `rename_model()` is returning a
|
||||
`ModelConfigBase` while `update_model()` returns a `AnyModelConfig`.
|
||||
|
||||
***
|
||||
|
||||
## Model installation
|
||||
|
||||
The `ModelInstallService` class implements the
|
||||
`ModelInstallServiceBase` abstract base class, and provides a one-stop
|
||||
shop for all your model install needs. It provides the following
|
||||
functionality:
|
||||
|
||||
- Registering a model config record for a model already located on the
|
||||
local filesystem, without moving it or changing its path.
|
||||
|
||||
- Installing a model alreadiy located on the local filesystem, by
|
||||
moving it into the InvokeAI root directory under the
|
||||
`models` folder (or wherever config parameter `models_dir`
|
||||
specifies).
|
||||
|
||||
- Probing of models to determine their type, base type and other key
|
||||
information.
|
||||
|
||||
- Interface with the InvokeAI event bus to provide status updates on
|
||||
the download, installation and registration process.
|
||||
|
||||
- Downloading a model from an arbitrary URL and installing it in
|
||||
`models_dir` (_implementation pending_).
|
||||
|
||||
- Special handling for Civitai model URLs which allow the user to
|
||||
paste in a model page's URL or download link (_implementation pending_).
|
||||
|
||||
|
||||
- Special handling for HuggingFace repo_ids to recursively download
|
||||
the contents of the repository, paying attention to alternative
|
||||
variants such as fp16. (_implementation pending_)
|
||||
|
||||
### Initializing the installer
|
||||
|
||||
A default installer is created at InvokeAI api startup time and stored
|
||||
in `ApiDependencies.invoker.services.model_install` and can
|
||||
also be retrieved from an invocation's `context` argument with
|
||||
`context.services.model_install`.
|
||||
|
||||
In the event you wish to create a new installer, you may use the
|
||||
following initialization pattern:
|
||||
|
||||
```
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_records import ModelRecordServiceSQL
|
||||
from invokeai.app.services.model_install import ModelInstallService
|
||||
from invokeai.app.services.shared.sqlite import SqliteDatabase
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
config.parse_args()
|
||||
logger = InvokeAILogger.get_logger(config=config)
|
||||
db = SqliteDatabase(config, logger)
|
||||
|
||||
store = ModelRecordServiceSQL(db)
|
||||
installer = ModelInstallService(config, store)
|
||||
```
|
||||
|
||||
The full form of `ModelInstallService()` takes the following
|
||||
required parameters:
|
||||
|
||||
| **Argument** | **Type** | **Description** |
|
||||
|------------------|------------------------------|------------------------------|
|
||||
| `config` | InvokeAIAppConfig | InvokeAI app configuration object |
|
||||
| `record_store` | ModelRecordServiceBase | Config record storage database |
|
||||
| `event_bus` | EventServiceBase | Optional event bus to send download/install progress events to |
|
||||
|
||||
Once initialized, the installer will provide the following methods:
|
||||
|
||||
#### install_job = installer.import_model()
|
||||
|
||||
The `import_model()` method is the core of the installer. The
|
||||
following illustrates basic usage:
|
||||
|
||||
```
|
||||
from invokeai.app.services.model_install import (
|
||||
LocalModelSource,
|
||||
HFModelSource,
|
||||
URLModelSource,
|
||||
)
|
||||
|
||||
source1 = LocalModelSource(path='/opt/models/sushi.safetensors') # a local safetensors file
|
||||
source2 = LocalModelSource(path='/opt/models/sushi_diffusers') # a local diffusers folder
|
||||
|
||||
source3 = HFModelSource(repo_id='runwayml/stable-diffusion-v1-5') # a repo_id
|
||||
source4 = HFModelSource(repo_id='runwayml/stable-diffusion-v1-5', subfolder='vae') # a subfolder within a repo_id
|
||||
source5 = HFModelSource(repo_id='runwayml/stable-diffusion-v1-5', variant='fp16') # a named variant of a HF model
|
||||
|
||||
source6 = URLModelSource(url='https://civitai.com/api/download/models/63006') # model located at a URL
|
||||
source7 = URLModelSource(url='https://civitai.com/api/download/models/63006', access_token='letmein') # with an access token
|
||||
|
||||
for source in [source1, source2, source3, source4, source5, source6, source7]:
|
||||
install_job = installer.install_model(source)
|
||||
|
||||
source2job = installer.wait_for_installs()
|
||||
for source in sources:
|
||||
job = source2job[source]
|
||||
if job.status == "completed":
|
||||
model_config = job.config_out
|
||||
model_key = model_config.key
|
||||
print(f"{source} installed as {model_key}")
|
||||
elif job.status == "error":
|
||||
print(f"{source}: {job.error_type}.\nStack trace:\n{job.error}")
|
||||
|
||||
```
|
||||
|
||||
As shown here, the `import_model()` method accepts a variety of
|
||||
sources, including local safetensors files, local diffusers folders,
|
||||
HuggingFace repo_ids with and without a subfolder designation,
|
||||
Civitai model URLs and arbitrary URLs that point to checkpoint files
|
||||
(but not to folders).
|
||||
|
||||
Each call to `import_model()` return a `ModelInstallJob` job,
|
||||
an object which tracks the progress of the install.
|
||||
|
||||
If a remote model is requested, the model's files are downloaded in
|
||||
parallel across a multiple set of threads using the download
|
||||
queue. During the download process, the `ModelInstallJob` is updated
|
||||
to provide status and progress information. After the files (if any)
|
||||
are downloaded, the remainder of the installation runs in a single
|
||||
serialized background thread. These are the model probing, file
|
||||
copying, and config record database update steps.
|
||||
|
||||
Multiple install jobs can be queued up. You may block until all
|
||||
install jobs are completed (or errored) by calling the
|
||||
`wait_for_installs()` method as shown in the code
|
||||
example. `wait_for_installs()` will return a `dict` that maps the
|
||||
requested source to its job. This object can be interrogated
|
||||
to determine its status. If the job errored out, then the error type
|
||||
and details can be recovered from `job.error_type` and `job.error`.
|
||||
|
||||
The full list of arguments to `import_model()` is as follows:
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `source` | Union[str, Path, AnyHttpUrl] | | The source of the model, Path, URL or repo_id |
|
||||
| `inplace` | bool | True | Leave a local model in its current location |
|
||||
| `variant` | str | None | Desired variant, such as 'fp16' or 'onnx' (HuggingFace only) |
|
||||
| `subfolder` | str | None | Repository subfolder (HuggingFace only) |
|
||||
| `config` | Dict[str, Any] | None | Override all or a portion of model's probed attributes |
|
||||
| `access_token` | str | None | Provide authorization information needed to download |
|
||||
|
||||
|
||||
The `inplace` field controls how local model Paths are handled. If
|
||||
True (the default), then the model is simply registered in its current
|
||||
location by the installer's `ModelConfigRecordService`. Otherwise, a
|
||||
copy of the model put into the location specified by the `models_dir`
|
||||
application configuration parameter.
|
||||
|
||||
The `variant` field is used for HuggingFace repo_ids only. If
|
||||
provided, the repo_id download handler will look for and download
|
||||
tensors files that follow the convention for the selected variant:
|
||||
|
||||
- "fp16" will select files named "*model.fp16.{safetensors,bin}"
|
||||
- "onnx" will select files ending with the suffix ".onnx"
|
||||
- "openvino" will select files beginning with "openvino_model"
|
||||
|
||||
In the special case of the "fp16" variant, the installer will select
|
||||
the 32-bit version of the files if the 16-bit version is unavailable.
|
||||
|
||||
`subfolder` is used for HuggingFace repo_ids only. If provided, the
|
||||
model will be downloaded from the designated subfolder rather than the
|
||||
top-level repository folder. If a subfolder is attached to the repo_id
|
||||
using the format `repo_owner/repo_name:subfolder`, then the subfolder
|
||||
specified by the repo_id will override the subfolder argument.
|
||||
|
||||
`config` can be used to override all or a portion of the configuration
|
||||
attributes returned by the model prober. See the section below for
|
||||
details.
|
||||
|
||||
`access_token` is passed to the download queue and used to access
|
||||
repositories that require it.
|
||||
|
||||
#### Monitoring the install job process
|
||||
|
||||
When you create an install job with `import_model()`, it launches the
|
||||
download and installation process in the background and returns a
|
||||
`ModelInstallJob` object for monitoring the process.
|
||||
|
||||
The `ModelInstallJob` class has the following structure:
|
||||
|
||||
| **Attribute** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `status` | `InstallStatus` | An enum of ["waiting", "running", "completed" and "error" |
|
||||
| `config_in` | `dict` | Overriding configuration values provided by the caller |
|
||||
| `config_out` | `AnyModelConfig`| After successful completion, contains the configuration record written to the database |
|
||||
| `inplace` | `boolean` | True if the caller asked to install the model in place using its local path |
|
||||
| `source` | `ModelSource` | The local path, remote URL or repo_id of the model to be installed |
|
||||
| `local_path` | `Path` | If a remote model, holds the path of the model after it is downloaded; if a local model, same as `source` |
|
||||
| `error_type` | `str` | Name of the exception that led to an error status |
|
||||
| `error` | `str` | Traceback of the error |
|
||||
|
||||
|
||||
If the `event_bus` argument was provided, events will also be
|
||||
broadcast to the InvokeAI event bus. The events will appear on the bus
|
||||
as an event of type `EventServiceBase.model_event`, a timestamp and
|
||||
the following event names:
|
||||
|
||||
- `model_install_started`
|
||||
|
||||
The payload will contain the keys `timestamp` and `source`. The latter
|
||||
indicates the requested model source for installation.
|
||||
|
||||
- `model_install_progress`
|
||||
|
||||
Emitted at regular intervals when downloading a remote model, the
|
||||
payload will contain the keys `timestamp`, `source`, `current_bytes`
|
||||
and `total_bytes`. These events are _not_ emitted when a local model
|
||||
already on the filesystem is imported.
|
||||
|
||||
- `model_install_completed`
|
||||
|
||||
Issued once at the end of a successful installation. The payload will
|
||||
contain the keys `timestamp`, `source` and `key`, where `key` is the
|
||||
ID under which the model has been registered.
|
||||
|
||||
- `model_install_error`
|
||||
|
||||
Emitted if the installation process fails for some reason. The payload
|
||||
will contain the keys `timestamp`, `source`, `error_type` and
|
||||
`error`. `error_type` is a short message indicating the nature of the
|
||||
error, and `error` is the long traceback to help debug the problem.
|
||||
|
||||
#### Model confguration and probing
|
||||
|
||||
The install service uses the `invokeai.backend.model_manager.probe`
|
||||
module during import to determine the model's type, base type, and
|
||||
other configuration parameters. Among other things, it assigns a
|
||||
default name and description for the model based on probed
|
||||
fields.
|
||||
|
||||
When downloading remote models is implemented, additional
|
||||
configuration information, such as list of trigger terms, will be
|
||||
retrieved from the HuggingFace and Civitai model repositories.
|
||||
|
||||
The probed values can be overriden by providing a dictionary in the
|
||||
optional `config` argument passed to `import_model()`. You may provide
|
||||
overriding values for any of the model's configuration
|
||||
attributes. Here is an example of setting the
|
||||
`SchedulerPredictionType` and `name` for an sd-2 model:
|
||||
|
||||
This is typically used to set
|
||||
the model's name and description, but can also be used to overcome
|
||||
cases in which automatic probing is unable to (correctly) determine
|
||||
the model's attribute. The most common situation is the
|
||||
`prediction_type` field for sd-2 (and rare sd-1) models. Here is an
|
||||
example of how it works:
|
||||
|
||||
```
|
||||
install_job = installer.import_model(
|
||||
source='stabilityai/stable-diffusion-2-1',
|
||||
variant='fp16',
|
||||
config=dict(
|
||||
prediction_type=SchedulerPredictionType('v_prediction')
|
||||
name='stable diffusion 2 base model',
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
### Other installer methods
|
||||
|
||||
This section describes additional methods provided by the installer class.
|
||||
|
||||
#### jobs = installer.wait_for_installs()
|
||||
|
||||
Block until all pending installs are completed or errored and then
|
||||
returns a list of completed jobs.
|
||||
|
||||
#### jobs = installer.list_jobs([source])
|
||||
|
||||
Return a list of all active and complete `ModelInstallJobs`. An
|
||||
optional `source` argument allows you to filter the returned list by a
|
||||
model source string pattern using a partial string match.
|
||||
|
||||
#### jobs = installer.get_job(source)
|
||||
|
||||
Return a list of `ModelInstallJob` corresponding to the indicated
|
||||
model source.
|
||||
|
||||
#### installer.prune_jobs
|
||||
|
||||
Remove non-pending jobs (completed or errored) from the job list
|
||||
returned by `list_jobs()` and `get_job()`.
|
||||
|
||||
#### installer.app_config, installer.record_store,
|
||||
installer.event_bus
|
||||
|
||||
Properties that provide access to the installer's `InvokeAIAppConfig`,
|
||||
`ModelRecordServiceBase` and `EventServiceBase` objects.
|
||||
|
||||
#### key = installer.register_path(model_path, config), key = installer.install_path(model_path, config)
|
||||
|
||||
These methods bypass the download queue and directly register or
|
||||
install the model at the indicated path, returning the unique ID for
|
||||
the installed model.
|
||||
|
||||
Both methods accept a Path object corresponding to a checkpoint or
|
||||
diffusers folder, and an optional dict of config attributes to use to
|
||||
override the values derived from model probing.
|
||||
|
||||
The difference between `register_path()` and `install_path()` is that
|
||||
the former creates a model configuration record without changing the
|
||||
location of the model in the filesystem. The latter makes a copy of
|
||||
the model inside the InvokeAI models directory before registering
|
||||
it.
|
||||
|
||||
#### installer.unregister(key)
|
||||
|
||||
This will remove the model config record for the model at key, and is
|
||||
equivalent to `installer.record_store.del_model(key)`
|
||||
|
||||
#### installer.delete(key)
|
||||
|
||||
This is similar to `unregister()` but has the additional effect of
|
||||
conditionally deleting the underlying model file(s) if they reside
|
||||
within the InvokeAI models directory
|
||||
|
||||
#### installer.unconditionally_delete(key)
|
||||
|
||||
This method is similar to `unregister()`, but also unconditionally
|
||||
deletes the corresponding model weights file(s), regardless of whether
|
||||
they are inside or outside the InvokeAI models hierarchy.
|
||||
|
||||
#### List[str]=installer.scan_directory(scan_dir: Path, install: bool)
|
||||
|
||||
This method will recursively scan the directory indicated in
|
||||
`scan_dir` for new models and either install them in the models
|
||||
directory or register them in place, depending on the setting of
|
||||
`install` (default False).
|
||||
|
||||
The return value is the list of keys of the new installed/registered
|
||||
models.
|
||||
|
||||
#### installer.sync_to_config()
|
||||
|
||||
This method synchronizes models in the models directory and autoimport
|
||||
directory to those in the `ModelConfigRecordService` database. New
|
||||
models are registered and orphan models are unregistered.
|
||||
|
||||
#### installer.start(invoker)
|
||||
|
||||
The `start` method is called by the API intialization routines when
|
||||
the API starts up. Its effect is to call `sync_to_config()` to
|
||||
synchronize the model record store database with what's currently on
|
||||
disk.
|
||||
|
||||
# The remainder of this documentation is provisional, pending implementation of the Download and Load services
|
||||
|
||||
## Let's get loaded, the lowdown on ModelLoadService
|
||||
|
||||
The `ModelLoadService` is responsible for loading a named model into
|
||||
@@ -1184,3 +863,351 @@ other resources that it might have been using.
|
||||
This will start/pause/cancel all jobs that have been submitted to the
|
||||
queue and have not yet reached a terminal state.
|
||||
|
||||
## Model installation
|
||||
|
||||
The `ModelInstallService` class implements the
|
||||
`ModelInstallServiceBase` abstract base class, and provides a one-stop
|
||||
shop for all your model install needs. It provides the following
|
||||
functionality:
|
||||
|
||||
- Registering a model config record for a model already located on the
|
||||
local filesystem, without moving it or changing its path.
|
||||
|
||||
- Installing a model alreadiy located on the local filesystem, by
|
||||
moving it into the InvokeAI root directory under the
|
||||
`models` folder (or wherever config parameter `models_dir`
|
||||
specifies).
|
||||
|
||||
- Downloading a model from an arbitrary URL and installing it in
|
||||
`models_dir`.
|
||||
|
||||
- Special handling for Civitai model URLs which allow the user to
|
||||
paste in a model page's URL or download link. Any metadata provided
|
||||
by Civitai, such as trigger terms, are captured and placed in the
|
||||
model config record.
|
||||
|
||||
- Special handling for HuggingFace repo_ids to recursively download
|
||||
the contents of the repository, paying attention to alternative
|
||||
variants such as fp16.
|
||||
|
||||
- Probing of models to determine their type, base type and other key
|
||||
information.
|
||||
|
||||
- Interface with the InvokeAI event bus to provide status updates on
|
||||
the download, installation and registration process.
|
||||
|
||||
### Initializing the installer
|
||||
|
||||
A default installer is created at InvokeAI api startup time and stored
|
||||
in `ApiDependencies.invoker.services.model_install_service` and can
|
||||
also be retrieved from an invocation's `context` argument with
|
||||
`context.services.model_install_service`.
|
||||
|
||||
In the event you wish to create a new installer, you may use the
|
||||
following initialization pattern:
|
||||
|
||||
```
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.download_manager import DownloadQueueServive
|
||||
from invokeai.app.services.model_record_service import ModelRecordServiceBase
|
||||
|
||||
config = InvokeAI.get_config()
|
||||
queue = DownloadQueueService()
|
||||
store = ModelRecordServiceBase.open(config)
|
||||
installer = ModelInstallService(config=config, queue=queue, store=store)
|
||||
```
|
||||
|
||||
The full form of `ModelInstallService()` takes the following
|
||||
parameters. Each parameter will default to a reasonable value, but it
|
||||
is recommended that you set them explicitly as shown in the above example.
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `config` | InvokeAIAppConfig | Use system-wide config | InvokeAI app configuration object |
|
||||
| `queue` | DownloadQueueServiceBase | Create a new download queue for internal use | Download queue |
|
||||
| `store` | ModelRecordServiceBase | Use config to select the database to open | Config storage database |
|
||||
| `event_bus` | EventServiceBase | None | An event bus to send download/install progress events to |
|
||||
| `event_handlers` | List[DownloadEventHandler] | None | Event handlers for the download queue |
|
||||
|
||||
Note that if `store` is not provided, then the class will use
|
||||
`ModelRecordServiceBase.open(config)` to select the database to use.
|
||||
|
||||
Once initialized, the installer will provide the following methods:
|
||||
|
||||
#### install_job = installer.install_model()
|
||||
|
||||
The `install_model()` method is the core of the installer. The
|
||||
following illustrates basic usage:
|
||||
|
||||
```
|
||||
sources = [
|
||||
Path('/opt/models/sushi.safetensors'), # a local safetensors file
|
||||
Path('/opt/models/sushi_diffusers/'), # a local diffusers folder
|
||||
'runwayml/stable-diffusion-v1-5', # a repo_id
|
||||
'runwayml/stable-diffusion-v1-5:vae', # a subfolder within a repo_id
|
||||
'https://civitai.com/api/download/models/63006', # a civitai direct download link
|
||||
'https://civitai.com/models/8765?modelVersionId=10638', # civitai model page
|
||||
'https://s3.amazon.com/fjacks/sd-3.safetensors', # arbitrary URL
|
||||
]
|
||||
|
||||
for source in sources:
|
||||
install_job = installer.install_model(source)
|
||||
|
||||
source2key = installer.wait_for_installs()
|
||||
for source in sources:
|
||||
model_key = source2key[source]
|
||||
print(f"{source} installed as {model_key}")
|
||||
```
|
||||
|
||||
As shown here, the `install_model()` method accepts a variety of
|
||||
sources, including local safetensors files, local diffusers folders,
|
||||
HuggingFace repo_ids with and without a subfolder designation,
|
||||
Civitai model URLs and arbitrary URLs that point to checkpoint files
|
||||
(but not to folders).
|
||||
|
||||
Each call to `install_model()` will return a `ModelInstallJob` job, a
|
||||
subclass of `DownloadJobBase`. The install job has additional
|
||||
install-specific fields described in the next section.
|
||||
|
||||
Each install job will run in a series of background threads using
|
||||
the object's download queue. You may block until all install jobs are
|
||||
completed (or errored) by calling the `wait_for_installs()` method as
|
||||
shown in the code example. `wait_for_installs()` will return a `dict`
|
||||
that maps the requested source to the key of the installed model. In
|
||||
the case that a model fails to download or install, its value in the
|
||||
dict will be None. The actual cause of the error will be reported in
|
||||
the corresponding job's `error` field.
|
||||
|
||||
Alternatively you may install event handlers and/or listen for events
|
||||
on the InvokeAI event bus in order to monitor the progress of the
|
||||
requested installs.
|
||||
|
||||
The full list of arguments to `model_install()` is as follows:
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `source` | Union[str, Path, AnyHttpUrl] | | The source of the model, Path, URL or repo_id |
|
||||
| `inplace` | bool | True | Leave a local model in its current location |
|
||||
| `variant` | str | None | Desired variant, such as 'fp16' or 'onnx' (HuggingFace only) |
|
||||
| `subfolder` | str | None | Repository subfolder (HuggingFace only) |
|
||||
| `probe_override` | Dict[str, Any] | None | Override all or a portion of model's probed attributes |
|
||||
| `metadata` | ModelSourceMetadata | None | Provide metadata that will be added to model's config |
|
||||
| `access_token` | str | None | Provide authorization information needed to download |
|
||||
| `priority` | int | 10 | Download queue priority for the job |
|
||||
|
||||
|
||||
The `inplace` field controls how local model Paths are handled. If
|
||||
True (the default), then the model is simply registered in its current
|
||||
location by the installer's `ModelConfigRecordService`. Otherwise, the
|
||||
model will be moved into the location specified by the `models_dir`
|
||||
application configuration parameter.
|
||||
|
||||
The `variant` field is used for HuggingFace repo_ids only. If
|
||||
provided, the repo_id download handler will look for and download
|
||||
tensors files that follow the convention for the selected variant:
|
||||
|
||||
- "fp16" will select files named "*model.fp16.{safetensors,bin}"
|
||||
- "onnx" will select files ending with the suffix ".onnx"
|
||||
- "openvino" will select files beginning with "openvino_model"
|
||||
|
||||
In the special case of the "fp16" variant, the installer will select
|
||||
the 32-bit version of the files if the 16-bit version is unavailable.
|
||||
|
||||
`subfolder` is used for HuggingFace repo_ids only. If provided, the
|
||||
model will be downloaded from the designated subfolder rather than the
|
||||
top-level repository folder. If a subfolder is attached to the repo_id
|
||||
using the format `repo_owner/repo_name:subfolder`, then the subfolder
|
||||
specified by the repo_id will override the subfolder argument.
|
||||
|
||||
`probe_override` can be used to override all or a portion of the
|
||||
attributes returned by the model prober. This can be used to overcome
|
||||
cases in which automatic probing is unable to (correctly) determine
|
||||
the model's attribute. The most common situation is the
|
||||
`prediction_type` field for sd-2 (and rare sd-1) models. Here is an
|
||||
example of how it works:
|
||||
|
||||
```
|
||||
install_job = installer.install_model(
|
||||
source='stabilityai/stable-diffusion-2-1',
|
||||
variant='fp16',
|
||||
probe_override=dict(
|
||||
prediction_type=SchedulerPredictionType('v_prediction')
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
`metadata` allows you to attach custom metadata to the installed
|
||||
model. See the next section for details.
|
||||
|
||||
`priority` and `access_token` are passed to the download queue and
|
||||
have the same effect as they do for the DownloadQueueServiceBase.
|
||||
|
||||
#### Monitoring the install job process
|
||||
|
||||
When you create an install job with `model_install()`, events will be
|
||||
passed to the list of `DownloadEventHandlers` provided at installer
|
||||
initialization time. Event handlers can also be added to individual
|
||||
model install jobs by calling their `add_handler()` method as
|
||||
described earlier for the `DownloadQueueService`.
|
||||
|
||||
If the `event_bus` argument was provided, events will also be
|
||||
broadcast to the InvokeAI event bus. The events will appear on the bus
|
||||
as a singular event type named `model_event` with a payload of
|
||||
`job`. You can then retrieve the job and check its status.
|
||||
|
||||
** TO DO: ** consider breaking `model_event` into
|
||||
`model_install_started`, `model_install_completed`, etc. The event bus
|
||||
features have not yet been tested with FastAPI/websockets, and it may
|
||||
turn out that the job object is not serializable.
|
||||
|
||||
#### Model metadata and probing
|
||||
|
||||
The install service has special handling for HuggingFace and Civitai
|
||||
URLs that capture metadata from the source and include it in the model
|
||||
configuration record. For example, fetching the Civitai model 8765
|
||||
will produce a config record similar to this (using YAML
|
||||
representation):
|
||||
|
||||
```
|
||||
5abc3ef8600b6c1cc058480eaae3091e:
|
||||
path: sd-1/lora/to8contrast-1-5.safetensors
|
||||
name: to8contrast-1-5
|
||||
base_model: sd-1
|
||||
model_type: lora
|
||||
model_format: lycoris
|
||||
key: 5abc3ef8600b6c1cc058480eaae3091e
|
||||
hash: 5abc3ef8600b6c1cc058480eaae3091e
|
||||
description: 'Trigger terms: to8contrast style'
|
||||
author: theovercomer8
|
||||
license: allowCommercialUse=Sell; allowDerivatives=True; allowNoCredit=True
|
||||
source: https://civitai.com/models/8765?modelVersionId=10638
|
||||
thumbnail_url: null
|
||||
tags:
|
||||
- model
|
||||
- style
|
||||
- portraits
|
||||
```
|
||||
|
||||
For sources that do not provide model metadata, you can attach custom
|
||||
fields by providing a `metadata` argument to `model_install()` using
|
||||
an initialized `ModelSourceMetadata` object (available for import from
|
||||
`model_install_service.py`):
|
||||
|
||||
```
|
||||
from invokeai.app.services.model_install_service import ModelSourceMetadata
|
||||
meta = ModelSourceMetadata(
|
||||
name="my model",
|
||||
author="Sushi Chef",
|
||||
description="Highly customized model; trigger with 'sushi',"
|
||||
license="mit",
|
||||
thumbnail_url="http://s3.amazon.com/ljack/pics/sushi.png",
|
||||
tags=list('sfw', 'food')
|
||||
)
|
||||
install_job = installer.install_model(
|
||||
source='sushi_chef/model3',
|
||||
variant='fp16',
|
||||
metadata=meta,
|
||||
)
|
||||
```
|
||||
|
||||
It is not currently recommended to provide custom metadata when
|
||||
installing from Civitai or HuggingFace source, as the metadata
|
||||
provided by the source will overwrite the fields you provide. Instead,
|
||||
after the model is installed you can use
|
||||
`ModelRecordService.update_model()` to change the desired fields.
|
||||
|
||||
** TO DO: ** Change the logic so that the caller's metadata fields take
|
||||
precedence over those provided by the source.
|
||||
|
||||
|
||||
#### Other installer methods
|
||||
|
||||
This section describes additional, less-frequently-used attributes and
|
||||
methods provided by the installer class.
|
||||
|
||||
##### installer.wait_for_installs()
|
||||
|
||||
This is equivalent to the `DownloadQueue` `join()` method. It will
|
||||
block until all the active jobs in the install queue have reached a
|
||||
terminal state (completed, errored or cancelled).
|
||||
|
||||
##### installer.queue, installer.store, installer.config
|
||||
|
||||
These attributes provide access to the `DownloadQueueServiceBase`,
|
||||
`ModelConfigRecordServiceBase`, and `InvokeAIAppConfig` objects that
|
||||
the installer uses.
|
||||
|
||||
For example, to temporarily pause all pending installations, you can
|
||||
do this:
|
||||
|
||||
```
|
||||
installer.queue.pause_all_jobs()
|
||||
```
|
||||
##### key = installer.register_path(model_path, overrides), key = installer.install_path(model_path, overrides)
|
||||
|
||||
These methods bypass the download queue and directly register or
|
||||
install the model at the indicated path, returning the unique ID for
|
||||
the installed model.
|
||||
|
||||
Both methods accept a Path object corresponding to a checkpoint or
|
||||
diffusers folder, and an optional dict of attributes to use to
|
||||
override the values derived from model probing.
|
||||
|
||||
The difference between `register_path()` and `install_path()` is that
|
||||
the former will not move the model from its current position, while
|
||||
the latter will move it into the `models_dir` hierarchy.
|
||||
|
||||
##### installer.unregister(key)
|
||||
|
||||
This will remove the model config record for the model at key, and is
|
||||
equivalent to `installer.store.unregister(key)`
|
||||
|
||||
##### installer.delete(key)
|
||||
|
||||
This is similar to `unregister()` but has the additional effect of
|
||||
deleting the underlying model file(s) -- even if they were outside the
|
||||
`models_dir` directory!
|
||||
|
||||
##### installer.conditionally_delete(key)
|
||||
|
||||
This method will call `unregister()` if the model identified by `key`
|
||||
is outside the `models_dir` hierarchy, and call `delete()` if the
|
||||
model is inside.
|
||||
|
||||
#### List[str]=installer.scan_directory(scan_dir: Path, install: bool)
|
||||
|
||||
This method will recursively scan the directory indicated in
|
||||
`scan_dir` for new models and either install them in the models
|
||||
directory or register them in place, depending on the setting of
|
||||
`install` (default False).
|
||||
|
||||
The return value is the list of keys of the new installed/registered
|
||||
models.
|
||||
|
||||
#### installer.scan_models_directory()
|
||||
|
||||
This method scans the models directory for new models and registers
|
||||
them in place. Models that are present in the
|
||||
`ModelConfigRecordService` database whose paths are not found will be
|
||||
unregistered.
|
||||
|
||||
#### installer.sync_to_config()
|
||||
|
||||
This method synchronizes models in the models directory and autoimport
|
||||
directory to those in the `ModelConfigRecordService` database. New
|
||||
models are registered and orphan models are unregistered.
|
||||
|
||||
#### hash=installer.hash(model_path)
|
||||
|
||||
This method is calls the fasthash algorithm on a model's Path
|
||||
(either a file or a folder) to generate a unique ID based on the
|
||||
contents of the model.
|
||||
|
||||
##### installer.start(invoker)
|
||||
|
||||
The `start` method is called by the API intialization routines when
|
||||
the API starts up. Its effect is to call `sync_to_config()` to
|
||||
synchronize the model record store database with what's currently on
|
||||
disk.
|
||||
|
||||
This method should not ordinarily be called manually.
|
||||
|
||||
@@ -154,16 +154,14 @@ groups in `invokeia.yaml`:
|
||||
|
||||
### Web Server
|
||||
|
||||
| Setting | Default Value | Description |
|
||||
|---------------------|---------------|----------------------------------------------------------------------------------------------------------------------------|
|
||||
| `host` | `localhost` | Name or IP address of the network interface that the web server will listen on |
|
||||
| `port` | `9090` | Network port number that the web server will listen on |
|
||||
| `allow_origins` | `[]` | A list of host names or IP addresses that are allowed to connect to the InvokeAI API in the format `['host1','host2',...]` |
|
||||
| `allow_credentials` | `true` | Require credentials for a foreign host to access the InvokeAI API (don't change this) |
|
||||
| `allow_methods` | `*` | List of HTTP methods ("GET", "POST") that the web server is allowed to use when accessing the API |
|
||||
| `allow_headers` | `*` | List of HTTP headers that the web server will accept when accessing the API |
|
||||
| `ssl_certfile` | null | Path to an SSL certificate file, used to enable HTTPS. |
|
||||
| `ssl_keyfile` | null | Path to an SSL keyfile, if the key is not included in the certificate file. |
|
||||
| Setting | Default Value | Description |
|
||||
|----------|----------------|--------------|
|
||||
| `host` | `localhost` | Name or IP address of the network interface that the web server will listen on |
|
||||
| `port` | `9090` | Network port number that the web server will listen on |
|
||||
| `allow_origins` | `[]` | A list of host names or IP addresses that are allowed to connect to the InvokeAI API in the format `['host1','host2',...]` |
|
||||
| `allow_credentials` | `true` | Require credentials for a foreign host to access the InvokeAI API (don't change this) |
|
||||
| `allow_methods` | `*` | List of HTTP methods ("GET", "POST") that the web server is allowed to use when accessing the API |
|
||||
| `allow_headers` | `*` | List of HTTP headers that the web server will accept when accessing the API |
|
||||
|
||||
The documentation for InvokeAI's API can be accessed by browsing to the following URL: [http://localhost:9090/docs].
|
||||
|
||||
|
||||
@@ -293,19 +293,6 @@ manager, please follow these steps:
|
||||
|
||||
## Developer Install
|
||||
|
||||
!!! warning
|
||||
|
||||
InvokeAI uses a SQLite database. By running on `main`, you accept responsibility for your database. This
|
||||
means making regular backups (especially before pulling) and/or fixing it yourself in the event that a
|
||||
PR introduces a schema change.
|
||||
|
||||
If you don't need persistent backend storage, you can use an ephemeral in-memory database by setting
|
||||
`use_memory_db: true` under `Path:` in your `invokeai.yaml` file.
|
||||
|
||||
If this is untenable, you should run the application via the official installer or a manual install of the
|
||||
python package from pypi. These releases will not break your database.
|
||||
|
||||
|
||||
If you have an interest in how InvokeAI works, or you would like to
|
||||
add features or bugfixes, you are encouraged to install the source
|
||||
code for InvokeAI. For this to work, you will need to install the
|
||||
@@ -401,5 +388,3 @@ environment variable INVOKEAI_ROOT to point to the installation directory.
|
||||
|
||||
Note that if you run into problems with the Conda installation, the InvokeAI
|
||||
staff will **not** be able to help you out. Caveat Emptor!
|
||||
|
||||
[dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939
|
||||
@@ -1,10 +0,0 @@
|
||||
document.addEventListener("DOMContentLoaded", function () {
|
||||
var script = document.createElement("script");
|
||||
script.src = "https://widget.kapa.ai/kapa-widget.bundle.js";
|
||||
script.setAttribute("data-website-id", "b5973bb1-476b-451e-8cf4-98de86745a10");
|
||||
script.setAttribute("data-project-name", "Invoke.AI");
|
||||
script.setAttribute("data-project-color", "#11213C");
|
||||
script.setAttribute("data-project-logo", "https://avatars.githubusercontent.com/u/113954515?s=280&v=4");
|
||||
script.async = true;
|
||||
document.head.appendChild(script);
|
||||
});
|
||||
@@ -14,10 +14,6 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
|
||||
- Community Nodes
|
||||
+ [Average Images](#average-images)
|
||||
+ [Clean Image Artifacts After Cut](#clean-image-artifacts-after-cut)
|
||||
+ [Close Color Mask](#close-color-mask)
|
||||
+ [Clothing Mask](#clothing-mask)
|
||||
+ [Contrast Limited Adaptive Histogram Equalization](#contrast-limited-adaptive-histogram-equalization)
|
||||
+ [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj)
|
||||
+ [Film Grain](#film-grain)
|
||||
+ [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes)
|
||||
@@ -26,22 +22,16 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
+ [Halftone](#halftone)
|
||||
+ [Ideal Size](#ideal-size)
|
||||
+ [Image and Mask Composition Pack](#image-and-mask-composition-pack)
|
||||
+ [Image Dominant Color](#image-dominant-color)
|
||||
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
|
||||
+ [Image Picker](#image-picker)
|
||||
+ [Image Resize Plus](#image-resize-plus)
|
||||
+ [Load Video Frame](#load-video-frame)
|
||||
+ [Make 3D](#make-3d)
|
||||
+ [Mask Operations](#mask-operations)
|
||||
+ [Match Histogram](#match-histogram)
|
||||
+ [Negative Image](#negative-image)
|
||||
+ [Oobabooga](#oobabooga)
|
||||
+ [Prompt Tools](#prompt-tools)
|
||||
+ [Remote Image](#remote-image)
|
||||
+ [Remove Background](#remove-background)
|
||||
+ [Retroize](#retroize)
|
||||
+ [Size Stepper Nodes](#size-stepper-nodes)
|
||||
+ [Simple Skin Detection](#simple-skin-detection)
|
||||
+ [Text font to Image](#text-font-to-image)
|
||||
+ [Thresholding](#thresholding)
|
||||
+ [Unsharp Mask](#unsharp-mask)
|
||||
@@ -58,46 +48,6 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/average-images-node
|
||||
|
||||
--------------------------------
|
||||
### Clean Image Artifacts After Cut
|
||||
|
||||
Description: Removes residual artifacts after an image is separated from its background.
|
||||
|
||||
Node Link: https://github.com/VeyDlin/clean-artifact-after-cut-node
|
||||
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/clean-artifact-after-cut-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Close Color Mask
|
||||
|
||||
Description: Generates a mask for images based on a closely matching color, useful for color-based selections.
|
||||
|
||||
Node Link: https://github.com/VeyDlin/close-color-mask-node
|
||||
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/close-color-mask-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Clothing Mask
|
||||
|
||||
Description: Employs a U2NET neural network trained for the segmentation of clothing items in images.
|
||||
|
||||
Node Link: https://github.com/VeyDlin/clothing-mask-node
|
||||
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/clothing-mask-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Contrast Limited Adaptive Histogram Equalization
|
||||
|
||||
Description: Enhances local image contrast using adaptive histogram equalization with contrast limiting.
|
||||
|
||||
Node Link: https://github.com/VeyDlin/clahe-node
|
||||
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/clahe-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Depth Map from Wavefront OBJ
|
||||
|
||||
@@ -214,16 +164,6 @@ This includes 15 Nodes:
|
||||
|
||||
</br><img src="https://raw.githubusercontent.com/dwringer/composition-nodes/main/composition_pack_overview.jpg" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Image Dominant Color
|
||||
|
||||
Description: Identifies and extracts the dominant color from an image using k-means clustering.
|
||||
|
||||
Node Link: https://github.com/VeyDlin/image-dominant-color-node
|
||||
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/image-dominant-color-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Image to Character Art Image Nodes
|
||||
|
||||
@@ -245,17 +185,6 @@ View:
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/image-picker-node
|
||||
|
||||
--------------------------------
|
||||
### Image Resize Plus
|
||||
|
||||
Description: Provides various image resizing options such as fill, stretch, fit, center, and crop.
|
||||
|
||||
Node Link: https://github.com/VeyDlin/image-resize-plus-node
|
||||
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/image-resize-plus-node/master/.readme/node.png" width="500" />
|
||||
|
||||
|
||||
--------------------------------
|
||||
### Load Video Frame
|
||||
|
||||
@@ -280,16 +209,6 @@ View:
|
||||
<img src="https://gitlab.com/srcrr/shift3d/-/raw/main/example-1.png" width="300" />
|
||||
<img src="https://gitlab.com/srcrr/shift3d/-/raw/main/example-2.png" width="300" />
|
||||
|
||||
--------------------------------
|
||||
### Mask Operations
|
||||
|
||||
Description: Offers logical operations (OR, SUB, AND) for combining and manipulating image masks.
|
||||
|
||||
Node Link: https://github.com/VeyDlin/mask-operations-node
|
||||
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/mask-operations-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Match Histogram
|
||||
|
||||
@@ -307,16 +226,6 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
|
||||
|
||||
<img src="https://github.com/skunkworxdark/match_histogram/assets/21961335/ed12f329-a0ef-444a-9bae-129ed60d6097" width="300" />
|
||||
|
||||
--------------------------------
|
||||
### Negative Image
|
||||
|
||||
Description: Creates a negative version of an image, effective for visual effects and mask inversion.
|
||||
|
||||
Node Link: https://github.com/VeyDlin/negative-image-node
|
||||
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/negative-image-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Oobabooga
|
||||
|
||||
@@ -380,15 +289,6 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
|
||||
|
||||
**Node Link:** https://github.com/fieldOfView/InvokeAI-remote_image
|
||||
|
||||
--------------------------------
|
||||
### Remove Background
|
||||
|
||||
Description: An integration of the rembg package to remove backgrounds from images using multiple U2NET models.
|
||||
|
||||
Node Link: https://github.com/VeyDlin/remove-background-node
|
||||
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/remove-background-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Retroize
|
||||
@@ -401,17 +301,6 @@ View:
|
||||
|
||||
<img src="https://github.com/Ar7ific1al/InvokeAI_nodes_retroize/assets/2306586/de8b4fa6-324c-4c2d-b36c-297600c73974" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Simple Skin Detection
|
||||
|
||||
Description: Detects skin in images based on predefined color thresholds.
|
||||
|
||||
Node Link: https://github.com/VeyDlin/simple-skin-detection-node
|
||||
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/simple-skin-detection-node/master/.readme/node.png" width="500" />
|
||||
|
||||
|
||||
--------------------------------
|
||||
### Size Stepper Nodes
|
||||
|
||||
@@ -497,7 +386,6 @@ See full docs here: https://github.com/skunkworxdark/XYGrid_nodes/edit/main/READ
|
||||
|
||||
<img src="https://github.com/skunkworxdark/XYGrid_nodes/blob/main/images/collage.png" width="300" />
|
||||
|
||||
|
||||
--------------------------------
|
||||
### Example Node Template
|
||||
|
||||
|
||||
@@ -2,72 +2,43 @@
|
||||
|
||||
set -e
|
||||
|
||||
BCYAN="\e[1;36m"
|
||||
BYELLOW="\e[1;33m"
|
||||
BGREEN="\e[1;32m"
|
||||
BRED="\e[1;31m"
|
||||
RED="\e[31m"
|
||||
RESET="\e[0m"
|
||||
|
||||
function is_bin_in_path {
|
||||
builtin type -P "$1" &>/dev/null
|
||||
}
|
||||
|
||||
function git_show {
|
||||
git show -s --format='%h %s' $1
|
||||
}
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
echo -e "${BYELLOW}This script must be run from the installer directory!${RESET}"
|
||||
echo "The current working directory is $(pwd)"
|
||||
read -p "If that looks right, press any key to proceed, or CTRL-C to exit..."
|
||||
echo
|
||||
|
||||
# Some machines only have `python3` in PATH, others have `python` - make an alias.
|
||||
# We can use a function to approximate an alias within a non-interactive shell.
|
||||
if ! is_bin_in_path python && is_bin_in_path python3; then
|
||||
function python {
|
||||
python3 "$@"
|
||||
}
|
||||
fi
|
||||
|
||||
if [[ -v "VIRTUAL_ENV" ]]; then
|
||||
# we can't just call 'deactivate' because this function is not exported
|
||||
# to the environment of this script from the bash process that runs the script
|
||||
echo -e "${BRED}A virtual environment is activated. Please deactivate it before proceeding.${RESET}"
|
||||
echo "A virtual environment is activated. Please deactivate it before proceeding".
|
||||
exit -1
|
||||
fi
|
||||
|
||||
VERSION=$(
|
||||
cd ..
|
||||
python -c "from invokeai.version import __version__ as version; print(version)"
|
||||
)
|
||||
VERSION=$(cd ..; python -c "from invokeai.version import __version__ as version; print(version)")
|
||||
PATCH=""
|
||||
VERSION="v${VERSION}${PATCH}"
|
||||
LATEST_TAG="v3-latest"
|
||||
|
||||
echo -e "${BGREEN}HEAD${RESET}:"
|
||||
git_show
|
||||
echo
|
||||
echo Building installer for version $VERSION
|
||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
# ---------------------- FRONTEND ----------------------
|
||||
read -e -p "Tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
||||
RESPONSE=${input:='n'}
|
||||
if [ "$RESPONSE" == 'y' ]; then
|
||||
|
||||
pushd ../invokeai/frontend/web >/dev/null
|
||||
echo
|
||||
echo "Installing frontend dependencies..."
|
||||
echo
|
||||
pnpm i --frozen-lockfile
|
||||
echo
|
||||
echo "Building frontend..."
|
||||
echo
|
||||
pnpm build
|
||||
popd
|
||||
git push origin :refs/tags/$VERSION
|
||||
if ! git tag -fa $VERSION ; then
|
||||
echo "Existing/invalid tag"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
# ---------------------- BACKEND ----------------------
|
||||
git push origin :refs/tags/$LATEST_TAG
|
||||
git tag -fa $LATEST_TAG
|
||||
|
||||
echo
|
||||
echo "Building wheel..."
|
||||
echo
|
||||
echo "remember to push --tags!"
|
||||
fi
|
||||
|
||||
# ----------------------
|
||||
|
||||
echo Building the wheel
|
||||
|
||||
# install the 'build' package in the user site packages, if needed
|
||||
# could be improved by using a temporary venv, but it's tiny and harmless
|
||||
@@ -75,15 +46,12 @@ if [[ $(python -c 'from importlib.util import find_spec; print(find_spec("build"
|
||||
pip install --user build
|
||||
fi
|
||||
|
||||
rm -rf ../build
|
||||
|
||||
rm -r ../build
|
||||
python -m build --wheel --outdir dist/ ../.
|
||||
|
||||
# ----------------------
|
||||
|
||||
echo
|
||||
echo "Building installer zip files for InvokeAI ${VERSION}..."
|
||||
echo
|
||||
echo Building installer zip fles for InvokeAI $VERSION
|
||||
|
||||
# get rid of any old ones
|
||||
rm -f *.zip
|
||||
@@ -91,11 +59,9 @@ rm -rf InvokeAI-Installer
|
||||
|
||||
# copy content
|
||||
mkdir InvokeAI-Installer
|
||||
for f in templates *.txt *.reg; do
|
||||
for f in templates lib *.txt *.reg; do
|
||||
cp -r ${f} InvokeAI-Installer/
|
||||
done
|
||||
mkdir InvokeAI-Installer/lib
|
||||
cp lib/*.py InvokeAI-Installer/lib
|
||||
|
||||
# Move the wheel
|
||||
mv dist/*.whl InvokeAI-Installer/lib/
|
||||
@@ -106,13 +72,13 @@ cp install.sh.in InvokeAI-Installer/install.sh
|
||||
chmod a+x InvokeAI-Installer/install.sh
|
||||
|
||||
# Windows
|
||||
perl -p -e "s/^set INVOKEAI_VERSION=.*/set INVOKEAI_VERSION=$VERSION/" install.bat.in >InvokeAI-Installer/install.bat
|
||||
perl -p -e "s/^set INVOKEAI_VERSION=.*/set INVOKEAI_VERSION=$VERSION/" install.bat.in > InvokeAI-Installer/install.bat
|
||||
cp WinLongPathsEnabled.reg InvokeAI-Installer/
|
||||
|
||||
# Zip everything up
|
||||
zip -r InvokeAI-installer-$VERSION.zip InvokeAI-Installer
|
||||
|
||||
# clean up
|
||||
rm -rf InvokeAI-Installer tmp dist ../invokeai/frontend/web/dist/
|
||||
rm -rf InvokeAI-Installer tmp dist
|
||||
|
||||
exit 0
|
||||
|
||||
@@ -244,9 +244,9 @@ class InvokeAiInstance:
|
||||
"numpy~=1.24.0", # choose versions that won't be uninstalled during phase 2
|
||||
"urllib3~=1.26.0",
|
||||
"requests~=2.28.0",
|
||||
"torch==2.1.1",
|
||||
"torch==2.1.0",
|
||||
"torchmetrics==0.11.4",
|
||||
"torchvision>=0.16.1",
|
||||
"torchvision>=0.14.1",
|
||||
"--force-reinstall",
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
BCYAN="\e[1;36m"
|
||||
BYELLOW="\e[1;33m"
|
||||
BGREEN="\e[1;32m"
|
||||
BRED="\e[1;31m"
|
||||
RED="\e[31m"
|
||||
RESET="\e[0m"
|
||||
|
||||
function does_tag_exist {
|
||||
git rev-parse --quiet --verify "refs/tags/$1" >/dev/null
|
||||
}
|
||||
|
||||
function git_show_ref {
|
||||
git show-ref --dereference $1 --abbrev 7
|
||||
}
|
||||
|
||||
function git_show {
|
||||
git show -s --format='%h %s' $1
|
||||
}
|
||||
|
||||
VERSION=$(
|
||||
cd ..
|
||||
python -c "from invokeai.version import __version__ as version; print(version)"
|
||||
)
|
||||
PATCH=""
|
||||
MAJOR_VERSION=$(echo $VERSION | sed 's/\..*$//')
|
||||
VERSION="v${VERSION}${PATCH}"
|
||||
LATEST_TAG="v${MAJOR_VERSION}-latest"
|
||||
|
||||
if does_tag_exist $VERSION; then
|
||||
echo -e "${BCYAN}${VERSION}${RESET} already exists:"
|
||||
git_show_ref tags/$VERSION
|
||||
echo
|
||||
fi
|
||||
if does_tag_exist $LATEST_TAG; then
|
||||
echo -e "${BCYAN}${LATEST_TAG}${RESET} already exists:"
|
||||
git_show_ref tags/$LATEST_TAG
|
||||
echo
|
||||
fi
|
||||
|
||||
echo -e "${BGREEN}HEAD${RESET}:"
|
||||
git_show
|
||||
echo
|
||||
|
||||
echo -e -n "Create tags ${BCYAN}${VERSION}${RESET} and ${BCYAN}${LATEST_TAG}${RESET} @ ${BGREEN}HEAD${RESET}, ${RED}deleting existing tags on remote${RESET}? "
|
||||
read -e -p 'y/n [n]: ' input
|
||||
RESPONSE=${input:='n'}
|
||||
if [ "$RESPONSE" == 'y' ]; then
|
||||
echo
|
||||
echo -e "Deleting ${BCYAN}${VERSION}${RESET} tag on remote..."
|
||||
git push --delete origin $VERSION
|
||||
|
||||
echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${VERSION}${RESET} locally..."
|
||||
if ! git tag -fa $VERSION; then
|
||||
echo "Existing/invalid tag"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
echo -e "Deleting ${BCYAN}${LATEST_TAG}${RESET} tag on remote..."
|
||||
git push --delete origin $LATEST_TAG
|
||||
|
||||
echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${LATEST_TAG}${RESET} locally..."
|
||||
git tag -fa $LATEST_TAG
|
||||
|
||||
echo -e "Pushing updated tags to remote..."
|
||||
git push origin --tags
|
||||
fi
|
||||
exit 0
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
from logging import Logger
|
||||
|
||||
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
|
||||
@@ -23,7 +22,6 @@ from ..services.invoker import Invoker
|
||||
from ..services.item_storage.item_storage_sqlite import SqliteItemStorage
|
||||
from ..services.latents_storage.latents_storage_disk import DiskLatentsStorage
|
||||
from ..services.latents_storage.latents_storage_forward_cache import ForwardCacheLatentsStorage
|
||||
from ..services.model_install import ModelInstallService
|
||||
from ..services.model_manager.model_manager_default import ModelManagerService
|
||||
from ..services.model_records import ModelRecordServiceSQL
|
||||
from ..services.names.names_default import SimpleNameService
|
||||
@@ -31,6 +29,7 @@ from ..services.session_processor.session_processor_default import DefaultSessio
|
||||
from ..services.session_queue.session_queue_sqlite import SqliteSessionQueue
|
||||
from ..services.shared.default_graphs import create_system_graphs
|
||||
from ..services.shared.graph import GraphExecutionState, LibraryGraph
|
||||
from ..services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
from ..services.urls.urls_default import LocalUrlService
|
||||
from ..services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
|
||||
from .events import FastAPIEventService
|
||||
@@ -67,9 +66,8 @@ class ApiDependencies:
|
||||
logger.debug(f"Internet connectivity is {config.internet_available}")
|
||||
|
||||
output_folder = config.output_path
|
||||
image_files = DiskImageFileStorage(f"{output_folder}/images")
|
||||
|
||||
db = init_db(config=config, logger=logger, image_files=image_files)
|
||||
db = SqliteDatabase(config, logger)
|
||||
|
||||
configuration = config
|
||||
logger = logger
|
||||
@@ -81,15 +79,13 @@ class ApiDependencies:
|
||||
events = FastAPIEventService(event_handler_id)
|
||||
graph_execution_manager = SqliteItemStorage[GraphExecutionState](db=db, table_name="graph_executions")
|
||||
graph_library = SqliteItemStorage[LibraryGraph](db=db, table_name="graphs")
|
||||
image_files = DiskImageFileStorage(f"{output_folder}/images")
|
||||
image_records = SqliteImageRecordStorage(db=db)
|
||||
images = ImageService()
|
||||
invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size)
|
||||
latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f"{output_folder}/latents"))
|
||||
model_manager = ModelManagerService(config, logger)
|
||||
model_record_service = ModelRecordServiceSQL(db=db)
|
||||
model_install_service = ModelInstallService(
|
||||
app_config=config, record_store=model_record_service, event_bus=events
|
||||
)
|
||||
names = SimpleNameService()
|
||||
performance_statistics = InvocationStatsService()
|
||||
processor = DefaultInvocationProcessor()
|
||||
@@ -116,7 +112,6 @@ class ApiDependencies:
|
||||
logger=logger,
|
||||
model_manager=model_manager,
|
||||
model_records=model_record_service,
|
||||
model_install=model_install_service,
|
||||
names=names,
|
||||
performance_statistics=performance_statistics,
|
||||
processor=processor,
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
from hashlib import sha1
|
||||
from random import randbytes
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import List, Optional
|
||||
|
||||
from fastapi import Body, Path, Query, Response
|
||||
from fastapi.routing import APIRouter
|
||||
@@ -12,7 +12,6 @@ from pydantic import BaseModel, ConfigDict
|
||||
from starlette.exceptions import HTTPException
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from invokeai.app.services.model_install import ModelInstallJob, ModelSource
|
||||
from invokeai.app.services.model_records import (
|
||||
DuplicateModelException,
|
||||
InvalidModelException,
|
||||
@@ -26,7 +25,7 @@ from invokeai.backend.model_manager.config import (
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
model_records_router = APIRouter(prefix="/v1/model/record", tags=["model_manager_v2_unstable"])
|
||||
model_records_router = APIRouter(prefix="/v1/model/record", tags=["models"])
|
||||
|
||||
|
||||
class ModelsList(BaseModel):
|
||||
@@ -44,25 +43,15 @@ class ModelsList(BaseModel):
|
||||
async def list_model_records(
|
||||
base_models: Optional[List[BaseModelType]] = Query(default=None, description="Base models to include"),
|
||||
model_type: Optional[ModelType] = Query(default=None, description="The type of model to get"),
|
||||
model_name: Optional[str] = Query(default=None, description="Exact match on the name of the model"),
|
||||
model_format: Optional[str] = Query(
|
||||
default=None, description="Exact match on the format of the model (e.g. 'diffusers')"
|
||||
),
|
||||
) -> ModelsList:
|
||||
"""Get a list of models."""
|
||||
record_store = ApiDependencies.invoker.services.model_records
|
||||
found_models: list[AnyModelConfig] = []
|
||||
if base_models:
|
||||
for base_model in base_models:
|
||||
found_models.extend(
|
||||
record_store.search_by_attr(
|
||||
base_model=base_model, model_type=model_type, model_name=model_name, model_format=model_format
|
||||
)
|
||||
)
|
||||
found_models.extend(record_store.search_by_attr(base_model=base_model, model_type=model_type))
|
||||
else:
|
||||
found_models.extend(
|
||||
record_store.search_by_attr(model_type=model_type, model_name=model_name, model_format=model_format)
|
||||
)
|
||||
found_models.extend(record_store.search_by_attr(model_type=model_type))
|
||||
return ModelsList(models=found_models)
|
||||
|
||||
|
||||
@@ -128,17 +117,12 @@ async def update_model_record(
|
||||
async def del_model_record(
|
||||
key: str = Path(description="Unique key of model to remove from model registry."),
|
||||
) -> Response:
|
||||
"""
|
||||
Delete model record from database.
|
||||
|
||||
The configuration record will be removed. The corresponding weights files will be
|
||||
deleted as well if they reside within the InvokeAI "models" directory.
|
||||
"""
|
||||
"""Delete Model"""
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
|
||||
try:
|
||||
installer = ApiDependencies.invoker.services.model_install
|
||||
installer.delete(key)
|
||||
record_store = ApiDependencies.invoker.services.model_records
|
||||
record_store.del_model(key)
|
||||
logger.info(f"Deleted model: {key}")
|
||||
return Response(status_code=204)
|
||||
except UnknownModelException as e:
|
||||
@@ -178,145 +162,3 @@ async def add_model_record(
|
||||
|
||||
# now fetch it out
|
||||
return record_store.get_model(config.key)
|
||||
|
||||
|
||||
@model_records_router.post(
|
||||
"/import",
|
||||
operation_id="import_model_record",
|
||||
responses={
|
||||
201: {"description": "The model imported successfully"},
|
||||
415: {"description": "Unrecognized file/folder format"},
|
||||
424: {"description": "The model appeared to import successfully, but could not be found in the model manager"},
|
||||
409: {"description": "There is already a model corresponding to this path or repo_id"},
|
||||
},
|
||||
status_code=201,
|
||||
)
|
||||
async def import_model(
|
||||
source: ModelSource,
|
||||
config: Optional[Dict[str, Any]] = Body(
|
||||
description="Dict of fields that override auto-probed values in the model config record, such as name, description and prediction_type ",
|
||||
default=None,
|
||||
),
|
||||
) -> ModelInstallJob:
|
||||
"""Add a model using its local path, repo_id, or remote URL.
|
||||
|
||||
Models will be downloaded, probed, configured and installed in a
|
||||
series of background threads. The return object has `status` attribute
|
||||
that can be used to monitor progress.
|
||||
|
||||
The source object is a discriminated Union of LocalModelSource,
|
||||
HFModelSource and URLModelSource. Set the "type" field to the
|
||||
appropriate value:
|
||||
|
||||
* To install a local path using LocalModelSource, pass a source of form:
|
||||
`{
|
||||
"type": "local",
|
||||
"path": "/path/to/model",
|
||||
"inplace": false
|
||||
}`
|
||||
The "inplace" flag, if true, will register the model in place in its
|
||||
current filesystem location. Otherwise, the model will be copied
|
||||
into the InvokeAI models directory.
|
||||
|
||||
* To install a HuggingFace repo_id using HFModelSource, pass a source of form:
|
||||
`{
|
||||
"type": "hf",
|
||||
"repo_id": "stabilityai/stable-diffusion-2.0",
|
||||
"variant": "fp16",
|
||||
"subfolder": "vae",
|
||||
"access_token": "f5820a918aaf01"
|
||||
}`
|
||||
The `variant`, `subfolder` and `access_token` fields are optional.
|
||||
|
||||
* To install a remote model using an arbitrary URL, pass:
|
||||
`{
|
||||
"type": "url",
|
||||
"url": "http://www.civitai.com/models/123456",
|
||||
"access_token": "f5820a918aaf01"
|
||||
}`
|
||||
The `access_token` field is optonal
|
||||
|
||||
The model's configuration record will be probed and filled in
|
||||
automatically. To override the default guesses, pass "metadata"
|
||||
with a Dict containing the attributes you wish to override.
|
||||
|
||||
Installation occurs in the background. Either use list_model_install_jobs()
|
||||
to poll for completion, or listen on the event bus for the following events:
|
||||
|
||||
"model_install_started"
|
||||
"model_install_completed"
|
||||
"model_install_error"
|
||||
|
||||
On successful completion, the event's payload will contain the field "key"
|
||||
containing the installed ID of the model. On an error, the event's payload
|
||||
will contain the fields "error_type" and "error" describing the nature of the
|
||||
error and its traceback, respectively.
|
||||
|
||||
"""
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
|
||||
try:
|
||||
installer = ApiDependencies.invoker.services.model_install
|
||||
result: ModelInstallJob = installer.import_model(
|
||||
source=source,
|
||||
config=config,
|
||||
)
|
||||
logger.info(f"Started installation of {source}")
|
||||
except UnknownModelException as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=424, detail=str(e))
|
||||
except InvalidModelException as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=415)
|
||||
except ValueError as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
return result
|
||||
|
||||
|
||||
@model_records_router.get(
|
||||
"/import",
|
||||
operation_id="list_model_install_jobs",
|
||||
)
|
||||
async def list_model_install_jobs() -> List[ModelInstallJob]:
|
||||
"""
|
||||
Return list of model install jobs.
|
||||
|
||||
If the optional 'source' argument is provided, then the list will be filtered
|
||||
for partial string matches against the install source.
|
||||
"""
|
||||
jobs: List[ModelInstallJob] = ApiDependencies.invoker.services.model_install.list_jobs()
|
||||
return jobs
|
||||
|
||||
|
||||
@model_records_router.patch(
|
||||
"/import",
|
||||
operation_id="prune_model_install_jobs",
|
||||
responses={
|
||||
204: {"description": "All completed and errored jobs have been pruned"},
|
||||
400: {"description": "Bad request"},
|
||||
},
|
||||
)
|
||||
async def prune_model_install_jobs() -> Response:
|
||||
"""
|
||||
Prune all completed and errored jobs from the install job list.
|
||||
"""
|
||||
ApiDependencies.invoker.services.model_install.prune_jobs()
|
||||
return Response(status_code=204)
|
||||
|
||||
|
||||
@model_records_router.patch(
|
||||
"/sync",
|
||||
operation_id="sync_models_to_config",
|
||||
responses={
|
||||
204: {"description": "Model config record database resynced with files on disk"},
|
||||
400: {"description": "Bad request"},
|
||||
},
|
||||
)
|
||||
async def sync_models_to_config() -> Response:
|
||||
"""
|
||||
Traverse the models and autoimport directories. Model files without a corresponding
|
||||
record in the database are added. Orphan records without a models file are deleted.
|
||||
"""
|
||||
ApiDependencies.invoker.services.model_install.sync_to_config()
|
||||
return Response(status_code=204)
|
||||
|
||||
@@ -20,7 +20,6 @@ class SocketIO:
|
||||
self.__sio.on("subscribe_queue", handler=self._handle_sub_queue)
|
||||
self.__sio.on("unsubscribe_queue", handler=self._handle_unsub_queue)
|
||||
local_handler.register(event_name=EventServiceBase.queue_event, _func=self._handle_queue_event)
|
||||
local_handler.register(event_name=EventServiceBase.model_event, _func=self._handle_model_event)
|
||||
|
||||
async def _handle_queue_event(self, event: Event):
|
||||
await self.__sio.emit(
|
||||
@@ -29,13 +28,10 @@ class SocketIO:
|
||||
room=event[1]["data"]["queue_id"],
|
||||
)
|
||||
|
||||
async def _handle_sub_queue(self, sid, data, *args, **kwargs) -> None:
|
||||
async def _handle_sub_queue(self, sid, data, *args, **kwargs):
|
||||
if "queue_id" in data:
|
||||
await self.__sio.enter_room(sid, data["queue_id"])
|
||||
|
||||
async def _handle_unsub_queue(self, sid, data, *args, **kwargs) -> None:
|
||||
async def _handle_unsub_queue(self, sid, data, *args, **kwargs):
|
||||
if "queue_id" in data:
|
||||
await self.__sio.leave_room(sid, data["queue_id"])
|
||||
|
||||
async def _handle_model_event(self, event: Event) -> None:
|
||||
await self.__sio.emit(event=event[1]["event"], data=event[1]["data"])
|
||||
|
||||
@@ -219,19 +219,18 @@ def overridden_redoc() -> HTMLResponse:
|
||||
|
||||
web_root_path = Path(list(web_dir.__path__)[0])
|
||||
|
||||
# Only serve the UI if we it has a build
|
||||
if (web_root_path / "dist").exists():
|
||||
# Cannot add headers to StaticFiles, so we must serve index.html with a custom route
|
||||
# Add cache-control: no-store header to prevent caching of index.html, which leads to broken UIs at release
|
||||
@app.get("/", include_in_schema=False, name="ui_root")
|
||||
def get_index() -> FileResponse:
|
||||
return FileResponse(Path(web_root_path, "dist/index.html"), headers={"Cache-Control": "no-store"})
|
||||
|
||||
# # Must mount *after* the other routes else it borks em
|
||||
app.mount("/assets", StaticFiles(directory=Path(web_root_path, "dist/assets/")), name="assets")
|
||||
app.mount("/locales", StaticFiles(directory=Path(web_root_path, "dist/locales/")), name="locales")
|
||||
# Cannot add headers to StaticFiles, so we must serve index.html with a custom route
|
||||
# Add cache-control: no-store header to prevent caching of index.html, which leads to broken UIs at release
|
||||
@app.get("/", include_in_schema=False, name="ui_root")
|
||||
def get_index() -> FileResponse:
|
||||
return FileResponse(Path(web_root_path, "dist/index.html"), headers={"Cache-Control": "no-store"})
|
||||
|
||||
|
||||
# # Must mount *after* the other routes else it borks em
|
||||
app.mount("/static", StaticFiles(directory=Path(web_root_path, "static/")), name="static") # docs favicon is in here
|
||||
app.mount("/assets", StaticFiles(directory=Path(web_root_path, "dist/assets/")), name="assets")
|
||||
app.mount("/locales", StaticFiles(directory=Path(web_root_path, "dist/locales/")), name="locales")
|
||||
|
||||
|
||||
def invoke_api() -> None:
|
||||
@@ -272,8 +271,6 @@ def invoke_api() -> None:
|
||||
port=port,
|
||||
loop="asyncio",
|
||||
log_level=app_config.log_level,
|
||||
ssl_certfile=app_config.ssl_certfile,
|
||||
ssl_keyfile=app_config.ssl_keyfile,
|
||||
)
|
||||
server = uvicorn.Server(config)
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
import re
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from inspect import signature
|
||||
@@ -39,19 +38,6 @@ class InvalidFieldError(TypeError):
|
||||
pass
|
||||
|
||||
|
||||
class Classification(str, Enum, metaclass=MetaEnum):
|
||||
"""
|
||||
The classification of an Invocation.
|
||||
- `Stable`: The invocation, including its inputs/outputs and internal logic, is stable. You may build workflows with it, having confidence that they will not break because of a change in this invocation.
|
||||
- `Beta`: The invocation is not yet stable, but is planned to be stable in the future. Workflows built around this invocation may break, but we are committed to supporting this invocation long-term.
|
||||
- `Prototype`: The invocation is not yet stable and may be removed from the application at any time. Workflows built around this invocation may break, and we are *not* committed to supporting this invocation.
|
||||
"""
|
||||
|
||||
Stable = "stable"
|
||||
Beta = "beta"
|
||||
Prototype = "prototype"
|
||||
|
||||
|
||||
class Input(str, Enum, metaclass=MetaEnum):
|
||||
"""
|
||||
The type of input a field accepts.
|
||||
@@ -452,7 +438,6 @@ class UIConfigBase(BaseModel):
|
||||
description='The node\'s version. Should be a valid semver string e.g. "1.0.0" or "3.8.13".',
|
||||
)
|
||||
node_pack: Optional[str] = Field(default=None, description="Whether or not this is a custom node")
|
||||
classification: Classification = Field(default=Classification.Stable, description="The node's classification")
|
||||
|
||||
model_config = ConfigDict(
|
||||
validate_assignment=True,
|
||||
@@ -621,7 +606,6 @@ class BaseInvocation(ABC, BaseModel):
|
||||
schema["category"] = uiconfig.category
|
||||
if uiconfig.node_pack is not None:
|
||||
schema["node_pack"] = uiconfig.node_pack
|
||||
schema["classification"] = uiconfig.classification
|
||||
schema["version"] = uiconfig.version
|
||||
if "required" not in schema or not isinstance(schema["required"], list):
|
||||
schema["required"] = []
|
||||
@@ -725,10 +709,8 @@ class _Model(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore", category=DeprecationWarning)
|
||||
# Get all pydantic model attrs, methods, etc
|
||||
RESERVED_PYDANTIC_FIELD_NAMES = {m[0] for m in inspect.getmembers(_Model())}
|
||||
# Get all pydantic model attrs, methods, etc
|
||||
RESERVED_PYDANTIC_FIELD_NAMES = {m[0] for m in inspect.getmembers(_Model())}
|
||||
|
||||
|
||||
def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None:
|
||||
@@ -797,7 +779,6 @@ def invocation(
|
||||
category: Optional[str] = None,
|
||||
version: Optional[str] = None,
|
||||
use_cache: Optional[bool] = True,
|
||||
classification: Classification = Classification.Stable,
|
||||
) -> Callable[[Type[TBaseInvocation]], Type[TBaseInvocation]]:
|
||||
"""
|
||||
Registers an invocation.
|
||||
@@ -808,7 +789,6 @@ def invocation(
|
||||
:param Optional[str] category: Adds a category to the invocation. Used to group the invocations in the UI. Defaults to None.
|
||||
:param Optional[str] version: Adds a version to the invocation. Must be a valid semver string. Defaults to None.
|
||||
:param Optional[bool] use_cache: Whether or not to use the invocation cache. Defaults to True. The user may override this in the workflow editor.
|
||||
:param Classification classification: The classification of the invocation. Defaults to FeatureClassification.Stable. Use Beta or Prototype if the invocation is unstable.
|
||||
"""
|
||||
|
||||
def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]:
|
||||
@@ -829,7 +809,6 @@ def invocation(
|
||||
cls.UIConfig.title = title
|
||||
cls.UIConfig.tags = tags
|
||||
cls.UIConfig.category = category
|
||||
cls.UIConfig.classification = classification
|
||||
|
||||
# Grab the node pack's name from the module name, if it's a custom node
|
||||
is_custom_node = cls.__module__.rsplit(".", 1)[0] == "invokeai.app.invocations"
|
||||
|
||||
@@ -13,15 +13,7 @@ from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
|
||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
Classification,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
WithMetadata,
|
||||
invocation,
|
||||
)
|
||||
from .baseinvocation import BaseInvocation, Input, InputField, InvocationContext, WithMetadata, invocation
|
||||
|
||||
|
||||
@invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.0")
|
||||
@@ -121,11 +113,11 @@ class ImageCropInvocation(BaseInvocation, WithMetadata):
|
||||
|
||||
|
||||
@invocation(
|
||||
invocation_type="img_pad_crop",
|
||||
title="Center Pad or Crop Image",
|
||||
"img_paste",
|
||||
title="Paste Image",
|
||||
tags=["image", "paste"],
|
||||
category="image",
|
||||
tags=["image", "pad", "crop"],
|
||||
version="1.0.0",
|
||||
version="1.2.0",
|
||||
)
|
||||
class CenterPadCropInvocation(BaseInvocation):
|
||||
"""Pad or crop an image's sides from the center by specified pixels. Positive values are outside of the image."""
|
||||
@@ -176,11 +168,11 @@ class CenterPadCropInvocation(BaseInvocation):
|
||||
|
||||
|
||||
@invocation(
|
||||
"img_paste",
|
||||
title="Paste Image",
|
||||
tags=["image", "paste"],
|
||||
invocation_type="img_pad_crop",
|
||||
title="Center Pad or Crop Image",
|
||||
category="image",
|
||||
version="1.2.0",
|
||||
tags=["image", "pad", "crop"],
|
||||
version="1.0.0",
|
||||
)
|
||||
class ImagePasteInvocation(BaseInvocation, WithMetadata):
|
||||
"""Pastes an image into another image."""
|
||||
@@ -429,64 +421,6 @@ class ImageBlurInvocation(BaseInvocation, WithMetadata):
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"unsharp_mask",
|
||||
title="Unsharp Mask",
|
||||
tags=["image", "unsharp_mask"],
|
||||
category="image",
|
||||
version="1.2.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class UnsharpMaskInvocation(BaseInvocation, WithMetadata):
|
||||
"""Applies an unsharp mask filter to an image"""
|
||||
|
||||
image: ImageField = InputField(description="The image to use")
|
||||
radius: float = InputField(gt=0, description="Unsharp mask radius", default=2)
|
||||
strength: float = InputField(ge=0, description="Unsharp mask strength", default=50)
|
||||
|
||||
def pil_from_array(self, arr):
|
||||
return Image.fromarray((arr * 255).astype("uint8"))
|
||||
|
||||
def array_from_pil(self, img):
|
||||
return numpy.array(img) / 255
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(self.image.image_name)
|
||||
mode = image.mode
|
||||
|
||||
alpha_channel = image.getchannel("A") if mode == "RGBA" else None
|
||||
image = image.convert("RGB")
|
||||
image_blurred = self.array_from_pil(image.filter(ImageFilter.GaussianBlur(radius=self.radius)))
|
||||
|
||||
image = self.array_from_pil(image)
|
||||
image += (image - image_blurred) * (self.strength / 100.0)
|
||||
image = numpy.clip(image, 0, 1)
|
||||
image = self.pil_from_array(image)
|
||||
|
||||
image = image.convert(mode)
|
||||
|
||||
# Make the image RGBA if we had a source alpha channel
|
||||
if alpha_channel is not None:
|
||||
image.putalpha(alpha_channel)
|
||||
|
||||
image_dto = context.services.images.create(
|
||||
image=image,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
image_category=ImageCategory.GENERAL,
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
metadata=self.metadata,
|
||||
workflow=context.workflow,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
image=ImageField(image_name=image_dto.image_name),
|
||||
width=image.width,
|
||||
height=image.height,
|
||||
)
|
||||
|
||||
|
||||
PIL_RESAMPLING_MODES = Literal[
|
||||
"nearest",
|
||||
"box",
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from typing import Literal
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel
|
||||
@@ -7,8 +5,6 @@ from pydantic import BaseModel
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
OutputField,
|
||||
@@ -18,13 +14,7 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
)
|
||||
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.backend.tiles.tiles import (
|
||||
calc_tiles_even_split,
|
||||
calc_tiles_min_overlap,
|
||||
calc_tiles_with_overlap,
|
||||
merge_tiles_with_linear_blending,
|
||||
merge_tiles_with_seam_blending,
|
||||
)
|
||||
from invokeai.backend.tiles.tiles import calc_tiles_with_overlap, merge_tiles_with_linear_blending
|
||||
from invokeai.backend.tiles.utils import Tile
|
||||
|
||||
|
||||
@@ -38,14 +28,7 @@ class CalculateImageTilesOutput(BaseInvocationOutput):
|
||||
tiles: list[Tile] = OutputField(description="The tiles coordinates that cover a particular image shape.")
|
||||
|
||||
|
||||
@invocation(
|
||||
"calculate_image_tiles",
|
||||
title="Calculate Image Tiles",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.0.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
@invocation("calculate_image_tiles", title="Calculate Image Tiles", tags=["tiles"], category="tiles", version="1.0.0")
|
||||
class CalculateImageTilesInvocation(BaseInvocation):
|
||||
"""Calculate the coordinates and overlaps of tiles that cover a target image shape."""
|
||||
|
||||
@@ -72,79 +55,6 @@ class CalculateImageTilesInvocation(BaseInvocation):
|
||||
return CalculateImageTilesOutput(tiles=tiles)
|
||||
|
||||
|
||||
@invocation(
|
||||
"calculate_image_tiles_even_split",
|
||||
title="Calculate Image Tiles Even Split",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.1.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class CalculateImageTilesEvenSplitInvocation(BaseInvocation):
|
||||
"""Calculate the coordinates and overlaps of tiles that cover a target image shape."""
|
||||
|
||||
image_width: int = InputField(ge=1, default=1024, description="The image width, in pixels, to calculate tiles for.")
|
||||
image_height: int = InputField(
|
||||
ge=1, default=1024, description="The image height, in pixels, to calculate tiles for."
|
||||
)
|
||||
num_tiles_x: int = InputField(
|
||||
default=2,
|
||||
ge=1,
|
||||
description="Number of tiles to divide image into on the x axis",
|
||||
)
|
||||
num_tiles_y: int = InputField(
|
||||
default=2,
|
||||
ge=1,
|
||||
description="Number of tiles to divide image into on the y axis",
|
||||
)
|
||||
overlap: int = InputField(
|
||||
default=128,
|
||||
ge=0,
|
||||
multiple_of=8,
|
||||
description="The overlap, in pixels, between adjacent tiles.",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> CalculateImageTilesOutput:
|
||||
tiles = calc_tiles_even_split(
|
||||
image_height=self.image_height,
|
||||
image_width=self.image_width,
|
||||
num_tiles_x=self.num_tiles_x,
|
||||
num_tiles_y=self.num_tiles_y,
|
||||
overlap=self.overlap,
|
||||
)
|
||||
return CalculateImageTilesOutput(tiles=tiles)
|
||||
|
||||
|
||||
@invocation(
|
||||
"calculate_image_tiles_min_overlap",
|
||||
title="Calculate Image Tiles Minimum Overlap",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.0.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class CalculateImageTilesMinimumOverlapInvocation(BaseInvocation):
|
||||
"""Calculate the coordinates and overlaps of tiles that cover a target image shape."""
|
||||
|
||||
image_width: int = InputField(ge=1, default=1024, description="The image width, in pixels, to calculate tiles for.")
|
||||
image_height: int = InputField(
|
||||
ge=1, default=1024, description="The image height, in pixels, to calculate tiles for."
|
||||
)
|
||||
tile_width: int = InputField(ge=1, default=576, description="The tile width, in pixels.")
|
||||
tile_height: int = InputField(ge=1, default=576, description="The tile height, in pixels.")
|
||||
min_overlap: int = InputField(default=128, ge=0, description="Minimum overlap between adjacent tiles, in pixels.")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> CalculateImageTilesOutput:
|
||||
tiles = calc_tiles_min_overlap(
|
||||
image_height=self.image_height,
|
||||
image_width=self.image_width,
|
||||
tile_height=self.tile_height,
|
||||
tile_width=self.tile_width,
|
||||
min_overlap=self.min_overlap,
|
||||
)
|
||||
return CalculateImageTilesOutput(tiles=tiles)
|
||||
|
||||
|
||||
@invocation_output("tile_to_properties_output")
|
||||
class TileToPropertiesOutput(BaseInvocationOutput):
|
||||
coords_left: int = OutputField(description="Left coordinate of the tile relative to its parent image.")
|
||||
@@ -166,14 +76,7 @@ class TileToPropertiesOutput(BaseInvocationOutput):
|
||||
overlap_right: int = OutputField(description="Overlap between this tile and its right neighbor.")
|
||||
|
||||
|
||||
@invocation(
|
||||
"tile_to_properties",
|
||||
title="Tile to Properties",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.0.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
@invocation("tile_to_properties", title="Tile to Properties", tags=["tiles"], category="tiles", version="1.0.0")
|
||||
class TileToPropertiesInvocation(BaseInvocation):
|
||||
"""Split a Tile into its individual properties."""
|
||||
|
||||
@@ -199,14 +102,7 @@ class PairTileImageOutput(BaseInvocationOutput):
|
||||
tile_with_image: TileWithImage = OutputField(description="A tile description with its corresponding image.")
|
||||
|
||||
|
||||
@invocation(
|
||||
"pair_tile_image",
|
||||
title="Pair Tile with Image",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.0.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
@invocation("pair_tile_image", title="Pair Tile with Image", tags=["tiles"], category="tiles", version="1.0.0")
|
||||
class PairTileImageInvocation(BaseInvocation):
|
||||
"""Pair an image with its tile properties."""
|
||||
|
||||
@@ -225,29 +121,13 @@ class PairTileImageInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
|
||||
BLEND_MODES = Literal["Linear", "Seam"]
|
||||
|
||||
|
||||
@invocation(
|
||||
"merge_tiles_to_image",
|
||||
title="Merge Tiles to Image",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.1.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
@invocation("merge_tiles_to_image", title="Merge Tiles to Image", tags=["tiles"], category="tiles", version="1.1.0")
|
||||
class MergeTilesToImageInvocation(BaseInvocation, WithMetadata):
|
||||
"""Merge multiple tile images into a single image."""
|
||||
|
||||
# Inputs
|
||||
tiles_with_images: list[TileWithImage] = InputField(description="A list of tile images with tile properties.")
|
||||
blend_mode: BLEND_MODES = InputField(
|
||||
default="Seam",
|
||||
description="blending type Linear or Seam",
|
||||
input=Input.Direct,
|
||||
)
|
||||
blend_amount: int = InputField(
|
||||
default=32,
|
||||
ge=0,
|
||||
description="The amount to blend adjacent tiles in pixels. Must be <= the amount of overlap between adjacent tiles.",
|
||||
)
|
||||
@@ -277,18 +157,10 @@ class MergeTilesToImageInvocation(BaseInvocation, WithMetadata):
|
||||
channels = tile_np_images[0].shape[-1]
|
||||
dtype = tile_np_images[0].dtype
|
||||
np_image = np.zeros(shape=(height, width, channels), dtype=dtype)
|
||||
if self.blend_mode == "Linear":
|
||||
merge_tiles_with_linear_blending(
|
||||
dst_image=np_image, tiles=tiles, tile_images=tile_np_images, blend_amount=self.blend_amount
|
||||
)
|
||||
elif self.blend_mode == "Seam":
|
||||
merge_tiles_with_seam_blending(
|
||||
dst_image=np_image, tiles=tiles, tile_images=tile_np_images, blend_amount=self.blend_amount
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported blend mode: '{self.blend_mode}'.")
|
||||
|
||||
# Convert into a PIL image and save
|
||||
merge_tiles_with_linear_blending(
|
||||
dst_image=np_image, tiles=tiles, tile_images=tile_np_images, blend_amount=self.blend_amount
|
||||
)
|
||||
pil_image = Image.fromarray(np_image)
|
||||
|
||||
image_dto = context.services.images.create(
|
||||
|
||||
@@ -20,6 +20,63 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
|
||||
self._conn = db.conn
|
||||
self._cursor = self._conn.cursor()
|
||||
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._create_tables()
|
||||
self._conn.commit()
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def _create_tables(self) -> None:
|
||||
"""Creates the `board_images` junction table."""
|
||||
|
||||
# Create the `board_images` junction table.
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS board_images (
|
||||
board_id TEXT NOT NULL,
|
||||
image_name TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Soft delete, currently unused
|
||||
deleted_at DATETIME,
|
||||
-- enforce one-to-many relationship between boards and images using PK
|
||||
-- (we can extend this to many-to-many later)
|
||||
PRIMARY KEY (image_name),
|
||||
FOREIGN KEY (board_id) REFERENCES boards (board_id) ON DELETE CASCADE,
|
||||
FOREIGN KEY (image_name) REFERENCES images (image_name) ON DELETE CASCADE
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
# Add index for board id
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_board_images_board_id ON board_images (board_id);
|
||||
"""
|
||||
)
|
||||
|
||||
# Add index for board id, sorted by created_at
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_board_images_board_id_created_at ON board_images (board_id, created_at);
|
||||
"""
|
||||
)
|
||||
|
||||
# Add trigger for `updated_at`.
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_board_images_updated_at
|
||||
AFTER UPDATE
|
||||
ON board_images FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE board_images SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE board_id = old.board_id AND image_name = old.image_name;
|
||||
END;
|
||||
"""
|
||||
)
|
||||
|
||||
def add_image_to_board(
|
||||
self,
|
||||
board_id: str,
|
||||
|
||||
@@ -28,6 +28,52 @@ class SqliteBoardRecordStorage(BoardRecordStorageBase):
|
||||
self._conn = db.conn
|
||||
self._cursor = self._conn.cursor()
|
||||
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._create_tables()
|
||||
self._conn.commit()
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def _create_tables(self) -> None:
|
||||
"""Creates the `boards` table and `board_images` junction table."""
|
||||
|
||||
# Create the `boards` table.
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS boards (
|
||||
board_id TEXT NOT NULL PRIMARY KEY,
|
||||
board_name TEXT NOT NULL,
|
||||
cover_image_name TEXT,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Soft delete, currently unused
|
||||
deleted_at DATETIME,
|
||||
FOREIGN KEY (cover_image_name) REFERENCES images (image_name) ON DELETE SET NULL
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_boards_created_at ON boards (created_at);
|
||||
"""
|
||||
)
|
||||
|
||||
# Add trigger for `updated_at`.
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_boards_updated_at
|
||||
AFTER UPDATE
|
||||
ON boards FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE boards SET updated_at = current_timestamp
|
||||
WHERE board_id = old.board_id;
|
||||
END;
|
||||
"""
|
||||
)
|
||||
|
||||
def delete(self, board_id: str) -> None:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Init file for InvokeAI configure package."""
|
||||
"""
|
||||
Init file for InvokeAI configure package
|
||||
"""
|
||||
|
||||
from .config_default import InvokeAIAppConfig, get_invokeai_config
|
||||
|
||||
__all__ = ["InvokeAIAppConfig", "get_invokeai_config"]
|
||||
from .config_base import PagingArgumentParser # noqa F401
|
||||
from .config_default import InvokeAIAppConfig, get_invokeai_config # noqa F401
|
||||
|
||||
@@ -173,7 +173,7 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, ClassVar, Dict, List, Literal, Optional, Union, get_type_hints
|
||||
from typing import ClassVar, Dict, List, Literal, Optional, Union, get_type_hints
|
||||
|
||||
from omegaconf import DictConfig, OmegaConf
|
||||
from pydantic import Field, TypeAdapter
|
||||
@@ -221,9 +221,6 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
allow_credentials : bool = Field(default=True, description="Allow CORS credentials", json_schema_extra=Categories.WebServer)
|
||||
allow_methods : List[str] = Field(default=["*"], description="Methods allowed for CORS", json_schema_extra=Categories.WebServer)
|
||||
allow_headers : List[str] = Field(default=["*"], description="Headers allowed for CORS", json_schema_extra=Categories.WebServer)
|
||||
# SSL options correspond to https://www.uvicorn.org/settings/#https
|
||||
ssl_certfile : Optional[Path] = Field(default=None, description="SSL certificate file (for HTTPS)", json_schema_extra=Categories.WebServer)
|
||||
ssl_keyfile : Optional[Path] = Field(default=None, description="SSL key file", json_schema_extra=Categories.WebServer)
|
||||
|
||||
# FEATURES
|
||||
esrgan : bool = Field(default=True, description="Enable/disable upscaling code", json_schema_extra=Categories.Features)
|
||||
@@ -337,7 +334,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_config(cls, **kwargs: Dict[str, Any]) -> InvokeAIAppConfig:
|
||||
def get_config(cls, **kwargs) -> InvokeAIAppConfig:
|
||||
"""Return a singleton InvokeAIAppConfig configuration object."""
|
||||
if (
|
||||
cls.singleton_config is None
|
||||
@@ -386,17 +383,17 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
return db_dir / DB_FILE
|
||||
|
||||
@property
|
||||
def model_conf_path(self) -> Path:
|
||||
def model_conf_path(self) -> Optional[Path]:
|
||||
"""Path to models configuration file."""
|
||||
return self._resolve(self.conf_path)
|
||||
|
||||
@property
|
||||
def legacy_conf_path(self) -> Path:
|
||||
def legacy_conf_path(self) -> Optional[Path]:
|
||||
"""Path to directory of legacy configuration files (e.g. v1-inference.yaml)."""
|
||||
return self._resolve(self.legacy_conf_dir)
|
||||
|
||||
@property
|
||||
def models_path(self) -> Path:
|
||||
def models_path(self) -> Optional[Path]:
|
||||
"""Path to the models directory."""
|
||||
return self._resolve(self.models_dir)
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
from .events_base import EventServiceBase # noqa F401
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
|
||||
from typing import Any, Optional
|
||||
|
||||
from invokeai.app.services.invocation_processor.invocation_processor_common import ProgressImage
|
||||
@@ -17,7 +16,6 @@ from invokeai.backend.model_management.models.base import BaseModelType, ModelTy
|
||||
|
||||
class EventServiceBase:
|
||||
queue_event: str = "queue_event"
|
||||
model_event: str = "model_event"
|
||||
|
||||
"""Basic event bus, to have an empty stand-in when not needed"""
|
||||
|
||||
@@ -32,13 +30,6 @@ class EventServiceBase:
|
||||
payload={"event": event_name, "data": payload},
|
||||
)
|
||||
|
||||
def __emit_model_event(self, event_name: str, payload: dict) -> None:
|
||||
payload["timestamp"] = get_timestamp()
|
||||
self.dispatch(
|
||||
event_name=EventServiceBase.model_event,
|
||||
payload={"event": event_name, "data": payload},
|
||||
)
|
||||
|
||||
# Define events here for every event in the system.
|
||||
# This will make them easier to integrate until we find a schema generator.
|
||||
def emit_generator_progress(
|
||||
@@ -322,73 +313,3 @@ class EventServiceBase:
|
||||
event_name="queue_cleared",
|
||||
payload={"queue_id": queue_id},
|
||||
)
|
||||
|
||||
def emit_model_install_started(self, source: str) -> None:
|
||||
"""
|
||||
Emitted when an install job is started.
|
||||
|
||||
:param source: Source of the model; local path, repo_id or url
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_started",
|
||||
payload={"source": source},
|
||||
)
|
||||
|
||||
def emit_model_install_completed(self, source: str, key: str) -> None:
|
||||
"""
|
||||
Emitted when an install job is completed successfully.
|
||||
|
||||
:param source: Source of the model; local path, repo_id or url
|
||||
:param key: Model config record key
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_completed",
|
||||
payload={
|
||||
"source": source,
|
||||
"key": key,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_model_install_progress(
|
||||
self,
|
||||
source: str,
|
||||
current_bytes: int,
|
||||
total_bytes: int,
|
||||
) -> None:
|
||||
"""
|
||||
Emitted while the install job is in progress.
|
||||
(Downloaded models only)
|
||||
|
||||
:param source: Source of the model
|
||||
:param current_bytes: Number of bytes downloaded so far
|
||||
:param total_bytes: Total bytes to download
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_progress",
|
||||
payload={
|
||||
"source": source,
|
||||
"current_bytes": int,
|
||||
"total_bytes": int,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_model_install_error(
|
||||
self,
|
||||
source: str,
|
||||
error_type: str,
|
||||
error: str,
|
||||
) -> None:
|
||||
"""
|
||||
Emitted when an install job encounters an exception.
|
||||
|
||||
:param source: Source of the model
|
||||
:param exception: The exception that raised the error
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_error",
|
||||
payload={
|
||||
"source": source,
|
||||
"error_type": error_type,
|
||||
"error": error,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -32,6 +32,101 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
self._conn = db.conn
|
||||
self._cursor = self._conn.cursor()
|
||||
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._create_tables()
|
||||
self._conn.commit()
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def _create_tables(self) -> None:
|
||||
"""Creates the `images` table."""
|
||||
|
||||
# Create the `images` table.
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS images (
|
||||
image_name TEXT NOT NULL PRIMARY KEY,
|
||||
-- This is an enum in python, unrestricted string here for flexibility
|
||||
image_origin TEXT NOT NULL,
|
||||
-- This is an enum in python, unrestricted string here for flexibility
|
||||
image_category TEXT NOT NULL,
|
||||
width INTEGER NOT NULL,
|
||||
height INTEGER NOT NULL,
|
||||
session_id TEXT,
|
||||
node_id TEXT,
|
||||
metadata TEXT,
|
||||
is_intermediate BOOLEAN DEFAULT FALSE,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Soft delete, currently unused
|
||||
deleted_at DATETIME
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
self._cursor.execute("PRAGMA table_info(images)")
|
||||
columns = [column[1] for column in self._cursor.fetchall()]
|
||||
|
||||
if "starred" not in columns:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
ALTER TABLE images ADD COLUMN starred BOOLEAN DEFAULT FALSE;
|
||||
"""
|
||||
)
|
||||
|
||||
# Create the `images` table indices.
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_images_image_name ON images(image_name);
|
||||
"""
|
||||
)
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_images_image_origin ON images(image_origin);
|
||||
"""
|
||||
)
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_images_image_category ON images(image_category);
|
||||
"""
|
||||
)
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_images_created_at ON images(created_at);
|
||||
"""
|
||||
)
|
||||
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_images_starred ON images(starred);
|
||||
"""
|
||||
)
|
||||
|
||||
# Add trigger for `updated_at`.
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_images_updated_at
|
||||
AFTER UPDATE
|
||||
ON images FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE images SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE image_name = old.image_name;
|
||||
END;
|
||||
"""
|
||||
)
|
||||
|
||||
self._cursor.execute("PRAGMA table_info(images)")
|
||||
columns = [column[1] for column in self._cursor.fetchall()]
|
||||
if "has_workflow" not in columns:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
ALTER TABLE images
|
||||
ADD COLUMN has_workflow BOOLEAN DEFAULT FALSE;
|
||||
"""
|
||||
)
|
||||
|
||||
def get(self, image_name: str) -> ImageRecord:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
|
||||
@@ -21,7 +21,6 @@ if TYPE_CHECKING:
|
||||
from .invocation_stats.invocation_stats_base import InvocationStatsServiceBase
|
||||
from .item_storage.item_storage_base import ItemStorageABC
|
||||
from .latents_storage.latents_storage_base import LatentsStorageBase
|
||||
from .model_install import ModelInstallServiceBase
|
||||
from .model_manager.model_manager_base import ModelManagerServiceBase
|
||||
from .model_records import ModelRecordServiceBase
|
||||
from .names.names_base import NameServiceBase
|
||||
@@ -51,7 +50,6 @@ class InvocationServices:
|
||||
logger: "Logger"
|
||||
model_manager: "ModelManagerServiceBase"
|
||||
model_records: "ModelRecordServiceBase"
|
||||
model_install: "ModelInstallServiceBase"
|
||||
processor: "InvocationProcessorABC"
|
||||
performance_statistics: "InvocationStatsServiceBase"
|
||||
queue: "InvocationQueueABC"
|
||||
@@ -79,7 +77,6 @@ class InvocationServices:
|
||||
logger: "Logger",
|
||||
model_manager: "ModelManagerServiceBase",
|
||||
model_records: "ModelRecordServiceBase",
|
||||
model_install: "ModelInstallServiceBase",
|
||||
processor: "InvocationProcessorABC",
|
||||
performance_statistics: "InvocationStatsServiceBase",
|
||||
queue: "InvocationQueueABC",
|
||||
@@ -105,7 +102,6 @@ class InvocationServices:
|
||||
self.logger = logger
|
||||
self.model_manager = model_manager
|
||||
self.model_records = model_records
|
||||
self.model_install = model_install
|
||||
self.processor = processor
|
||||
self.performance_statistics = performance_statistics
|
||||
self.queue = queue
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
"""Initialization file for model install service package."""
|
||||
|
||||
from .model_install_base import (
|
||||
HFModelSource,
|
||||
InstallStatus,
|
||||
LocalModelSource,
|
||||
ModelInstallJob,
|
||||
ModelInstallServiceBase,
|
||||
ModelSource,
|
||||
UnknownInstallJobException,
|
||||
URLModelSource,
|
||||
)
|
||||
from .model_install_default import ModelInstallService
|
||||
|
||||
__all__ = [
|
||||
"ModelInstallServiceBase",
|
||||
"ModelInstallService",
|
||||
"InstallStatus",
|
||||
"ModelInstallJob",
|
||||
"UnknownInstallJobException",
|
||||
"ModelSource",
|
||||
"LocalModelSource",
|
||||
"HFModelSource",
|
||||
"URLModelSource",
|
||||
]
|
||||
@@ -1,306 +0,0 @@
|
||||
import re
|
||||
import traceback
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Literal, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.events import EventServiceBase
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_records import ModelRecordServiceBase
|
||||
from invokeai.backend.model_manager import AnyModelConfig
|
||||
|
||||
|
||||
class InstallStatus(str, Enum):
|
||||
"""State of an install job running in the background."""
|
||||
|
||||
WAITING = "waiting" # waiting to be dequeued
|
||||
RUNNING = "running" # being processed
|
||||
COMPLETED = "completed" # finished running
|
||||
ERROR = "error" # terminated with an error message
|
||||
|
||||
|
||||
class UnknownInstallJobException(Exception):
|
||||
"""Raised when the status of an unknown job is requested."""
|
||||
|
||||
|
||||
class StringLikeSource(BaseModel):
|
||||
"""
|
||||
Base class for model sources, implements functions that lets the source be sorted and indexed.
|
||||
|
||||
These shenanigans let this stuff work:
|
||||
|
||||
source1 = LocalModelSource(path='C:/users/mort/foo.safetensors')
|
||||
mydict = {source1: 'model 1'}
|
||||
assert mydict['C:/users/mort/foo.safetensors'] == 'model 1'
|
||||
assert mydict[LocalModelSource(path='C:/users/mort/foo.safetensors')] == 'model 1'
|
||||
|
||||
source2 = LocalModelSource(path=Path('C:/users/mort/foo.safetensors'))
|
||||
assert source1 == source2
|
||||
assert source1 == 'C:/users/mort/foo.safetensors'
|
||||
"""
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""Return hash of the path field, for indexing."""
|
||||
return hash(str(self))
|
||||
|
||||
def __lt__(self, other: object) -> int:
|
||||
"""Return comparison of the stringified version, for sorting."""
|
||||
return str(self) < str(other)
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
"""Return equality on the stringified version."""
|
||||
if isinstance(other, Path):
|
||||
return str(self) == other.as_posix()
|
||||
else:
|
||||
return str(self) == str(other)
|
||||
|
||||
|
||||
class LocalModelSource(StringLikeSource):
|
||||
"""A local file or directory path."""
|
||||
|
||||
path: str | Path
|
||||
inplace: Optional[bool] = False
|
||||
type: Literal["local"] = "local"
|
||||
|
||||
# these methods allow the source to be used in a string-like way,
|
||||
# for example as an index into a dict
|
||||
def __str__(self) -> str:
|
||||
"""Return string version of path when string rep needed."""
|
||||
return Path(self.path).as_posix()
|
||||
|
||||
|
||||
class HFModelSource(StringLikeSource):
|
||||
"""A HuggingFace repo_id, with optional variant and sub-folder."""
|
||||
|
||||
repo_id: str
|
||||
variant: Optional[str] = None
|
||||
subfolder: Optional[str | Path] = None
|
||||
access_token: Optional[str] = None
|
||||
type: Literal["hf"] = "hf"
|
||||
|
||||
@field_validator("repo_id")
|
||||
@classmethod
|
||||
def proper_repo_id(cls, v: str) -> str: # noqa D102
|
||||
if not re.match(r"^([.\w-]+/[.\w-]+)$", v):
|
||||
raise ValueError(f"{v}: invalid repo_id format")
|
||||
return v
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Return string version of repoid when string rep needed."""
|
||||
base: str = self.repo_id
|
||||
base += f":{self.subfolder}" if self.subfolder else ""
|
||||
base += f" ({self.variant})" if self.variant else ""
|
||||
return base
|
||||
|
||||
|
||||
class URLModelSource(StringLikeSource):
|
||||
"""A generic URL point to a checkpoint file."""
|
||||
|
||||
url: AnyHttpUrl
|
||||
access_token: Optional[str] = None
|
||||
type: Literal["generic_url"] = "generic_url"
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Return string version of the url when string rep needed."""
|
||||
return str(self.url)
|
||||
|
||||
|
||||
ModelSource = Annotated[Union[LocalModelSource, HFModelSource, URLModelSource], Field(discriminator="type")]
|
||||
|
||||
|
||||
class ModelInstallJob(BaseModel):
|
||||
"""Object that tracks the current status of an install request."""
|
||||
|
||||
status: InstallStatus = Field(default=InstallStatus.WAITING, description="Current status of install process")
|
||||
config_in: Dict[str, Any] = Field(
|
||||
default_factory=dict, description="Configuration information (e.g. 'description') to apply to model."
|
||||
)
|
||||
config_out: Optional[AnyModelConfig] = Field(
|
||||
default=None, description="After successful installation, this will hold the configuration object."
|
||||
)
|
||||
inplace: bool = Field(
|
||||
default=False, description="Leave model in its current location; otherwise install under models directory"
|
||||
)
|
||||
source: ModelSource = Field(description="Source (URL, repo_id, or local path) of model")
|
||||
local_path: Path = Field(description="Path to locally-downloaded model; may be the same as the source")
|
||||
error_type: Optional[str] = Field(default=None, description="Class name of the exception that led to status==ERROR")
|
||||
error: Optional[str] = Field(default=None, description="Error traceback") # noqa #501
|
||||
|
||||
def set_error(self, e: Exception) -> None:
|
||||
"""Record the error and traceback from an exception."""
|
||||
self.error_type = e.__class__.__name__
|
||||
self.error = "".join(traceback.format_exception(e))
|
||||
self.status = InstallStatus.ERROR
|
||||
|
||||
|
||||
class ModelInstallServiceBase(ABC):
|
||||
"""Abstract base class for InvokeAI model installation."""
|
||||
|
||||
@abstractmethod
|
||||
def __init__(
|
||||
self,
|
||||
app_config: InvokeAIAppConfig,
|
||||
record_store: ModelRecordServiceBase,
|
||||
event_bus: Optional["EventServiceBase"] = None,
|
||||
):
|
||||
"""
|
||||
Create ModelInstallService object.
|
||||
|
||||
:param config: Systemwide InvokeAIAppConfig.
|
||||
:param store: Systemwide ModelConfigStore
|
||||
:param event_bus: InvokeAI event bus for reporting events to.
|
||||
"""
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
"""Call at InvokeAI startup time."""
|
||||
self.sync_to_config()
|
||||
|
||||
@abstractmethod
|
||||
def stop(self) -> None:
|
||||
"""Stop the model install service. After this the objection can be safely deleted."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def app_config(self) -> InvokeAIAppConfig:
|
||||
"""Return the appConfig object associated with the installer."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def record_store(self) -> ModelRecordServiceBase:
|
||||
"""Return the ModelRecoreService object associated with the installer."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def event_bus(self) -> Optional[EventServiceBase]:
|
||||
"""Return the event service base object associated with the installer."""
|
||||
|
||||
@abstractmethod
|
||||
def register_path(
|
||||
self,
|
||||
model_path: Union[Path, str],
|
||||
config: Optional[Dict[str, Any]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Probe and register the model at model_path.
|
||||
|
||||
This keeps the model in its current location.
|
||||
|
||||
:param model_path: Filesystem Path to the model.
|
||||
:param config: Dict of attributes that will override autoassigned values.
|
||||
:returns id: The string ID of the registered model.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def unregister(self, key: str) -> None:
|
||||
"""Remove model with indicated key from the database."""
|
||||
|
||||
@abstractmethod
|
||||
def delete(self, key: str) -> None:
|
||||
"""Remove model with indicated key from the database. Delete its files only if they are within our models directory."""
|
||||
|
||||
@abstractmethod
|
||||
def unconditionally_delete(self, key: str) -> None:
|
||||
"""Remove model with indicated key from the database and unconditionally delete weight files from disk."""
|
||||
|
||||
@abstractmethod
|
||||
def install_path(
|
||||
self,
|
||||
model_path: Union[Path, str],
|
||||
config: Optional[Dict[str, Any]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Probe, register and install the model in the models directory.
|
||||
|
||||
This moves the model from its current location into
|
||||
the models directory handled by InvokeAI.
|
||||
|
||||
:param model_path: Filesystem Path to the model.
|
||||
:param config: Dict of attributes that will override autoassigned values.
|
||||
:returns id: The string ID of the registered model.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def import_model(
|
||||
self,
|
||||
source: ModelSource,
|
||||
config: Optional[Dict[str, Any]] = None,
|
||||
) -> ModelInstallJob:
|
||||
"""Install the indicated model.
|
||||
|
||||
:param source: ModelSource object
|
||||
|
||||
:param config: Optional dict. Any fields in this dict
|
||||
will override corresponding autoassigned probe fields in the
|
||||
model's config record. Use it to override
|
||||
`name`, `description`, `base_type`, `model_type`, `format`,
|
||||
`prediction_type`, `image_size`, and/or `ztsnr_training`.
|
||||
|
||||
This will download the model located at `source`,
|
||||
probe it, and install it into the models directory.
|
||||
This call is executed asynchronously in a separate
|
||||
thread and will issue the following events on the event bus:
|
||||
|
||||
- model_install_started
|
||||
- model_install_error
|
||||
- model_install_completed
|
||||
|
||||
The `inplace` flag does not affect the behavior of downloaded
|
||||
models, which are always moved into the `models` directory.
|
||||
|
||||
The call returns a ModelInstallJob object which can be
|
||||
polled to learn the current status and/or error message.
|
||||
|
||||
Variants recognized by HuggingFace currently are:
|
||||
1. onnx
|
||||
2. openvino
|
||||
3. fp16
|
||||
4. None (usually returns fp32 model)
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_job(self, source: ModelSource) -> List[ModelInstallJob]:
|
||||
"""Return the ModelInstallJob(s) corresponding to the provided source."""
|
||||
|
||||
@abstractmethod
|
||||
def list_jobs(self) -> List[ModelInstallJob]: # noqa D102
|
||||
"""
|
||||
List active and complete install jobs.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def prune_jobs(self) -> None:
|
||||
"""Prune all completed and errored jobs."""
|
||||
|
||||
@abstractmethod
|
||||
def wait_for_installs(self) -> List[ModelInstallJob]:
|
||||
"""
|
||||
Wait for all pending installs to complete.
|
||||
|
||||
This will block until all pending installs have
|
||||
completed, been cancelled, or errored out. It will
|
||||
block indefinitely if one or more jobs are in the
|
||||
paused state.
|
||||
|
||||
It will return the current list of jobs.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def scan_directory(self, scan_dir: Path, install: bool = False) -> List[str]:
|
||||
"""
|
||||
Recursively scan directory for new models and register or install them.
|
||||
|
||||
:param scan_dir: Path to the directory to scan.
|
||||
:param install: Install if True, otherwise register in place.
|
||||
:returns list of IDs: Returns list of IDs of models registered/installed
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def sync_to_config(self) -> None:
|
||||
"""Synchronize models on disk to those in the model record database."""
|
||||
@@ -1,395 +0,0 @@
|
||||
"""Model installation class."""
|
||||
|
||||
import threading
|
||||
from hashlib import sha256
|
||||
from logging import Logger
|
||||
from pathlib import Path
|
||||
from queue import Queue
|
||||
from random import randbytes
|
||||
from shutil import copyfile, copytree, move, rmtree
|
||||
from typing import Any, Dict, List, Optional, Set, Union
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.events import EventServiceBase
|
||||
from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase, UnknownModelException
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
InvalidModelConfigException,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.hash import FastModelHash
|
||||
from invokeai.backend.model_manager.probe import ModelProbe
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
from invokeai.backend.util import Chdir, InvokeAILogger
|
||||
|
||||
from .model_install_base import (
|
||||
InstallStatus,
|
||||
LocalModelSource,
|
||||
ModelInstallJob,
|
||||
ModelInstallServiceBase,
|
||||
ModelSource,
|
||||
)
|
||||
|
||||
# marker that the queue is done and that thread should exit
|
||||
STOP_JOB = ModelInstallJob(
|
||||
source=LocalModelSource(path="stop"),
|
||||
local_path=Path("/dev/null"),
|
||||
)
|
||||
|
||||
|
||||
class ModelInstallService(ModelInstallServiceBase):
|
||||
"""class for InvokeAI model installation."""
|
||||
|
||||
_app_config: InvokeAIAppConfig
|
||||
_record_store: ModelRecordServiceBase
|
||||
_event_bus: Optional[EventServiceBase] = None
|
||||
_install_queue: Queue[ModelInstallJob]
|
||||
_install_jobs: List[ModelInstallJob]
|
||||
_logger: Logger
|
||||
_cached_model_paths: Set[Path]
|
||||
_models_installed: Set[str]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app_config: InvokeAIAppConfig,
|
||||
record_store: ModelRecordServiceBase,
|
||||
event_bus: Optional[EventServiceBase] = None,
|
||||
):
|
||||
"""
|
||||
Initialize the installer object.
|
||||
|
||||
:param app_config: InvokeAIAppConfig object
|
||||
:param record_store: Previously-opened ModelRecordService database
|
||||
:param event_bus: Optional EventService object
|
||||
"""
|
||||
self._app_config = app_config
|
||||
self._record_store = record_store
|
||||
self._event_bus = event_bus
|
||||
self._logger = InvokeAILogger.get_logger(name=self.__class__.__name__)
|
||||
self._install_jobs = []
|
||||
self._install_queue = Queue()
|
||||
self._cached_model_paths = set()
|
||||
self._models_installed = set()
|
||||
self._start_installer_thread()
|
||||
|
||||
@property
|
||||
def app_config(self) -> InvokeAIAppConfig: # noqa D102
|
||||
return self._app_config
|
||||
|
||||
@property
|
||||
def record_store(self) -> ModelRecordServiceBase: # noqa D102
|
||||
return self._record_store
|
||||
|
||||
@property
|
||||
def event_bus(self) -> Optional[EventServiceBase]: # noqa D102
|
||||
return self._event_bus
|
||||
|
||||
def stop(self, *args, **kwargs) -> None:
|
||||
"""Stop the install thread; after this the object can be deleted and garbage collected."""
|
||||
self._install_queue.put(STOP_JOB)
|
||||
|
||||
def _start_installer_thread(self) -> None:
|
||||
threading.Thread(target=self._install_next_item, daemon=True).start()
|
||||
|
||||
def _install_next_item(self) -> None:
|
||||
done = False
|
||||
while not done:
|
||||
job = self._install_queue.get()
|
||||
if job == STOP_JOB:
|
||||
done = True
|
||||
continue
|
||||
|
||||
assert job.local_path is not None
|
||||
try:
|
||||
self._signal_job_running(job)
|
||||
if job.inplace:
|
||||
key = self.register_path(job.local_path, job.config_in)
|
||||
else:
|
||||
key = self.install_path(job.local_path, job.config_in)
|
||||
job.config_out = self.record_store.get_model(key)
|
||||
self._signal_job_completed(job)
|
||||
|
||||
except (OSError, DuplicateModelException, InvalidModelConfigException) as excp:
|
||||
self._signal_job_errored(job, excp)
|
||||
finally:
|
||||
self._install_queue.task_done()
|
||||
self._logger.info("Install thread exiting")
|
||||
|
||||
def _signal_job_running(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.RUNNING
|
||||
self._logger.info(f"{job.source}: model installation started")
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_model_install_started(str(job.source))
|
||||
|
||||
def _signal_job_completed(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.COMPLETED
|
||||
assert job.config_out
|
||||
self._logger.info(
|
||||
f"{job.source}: model installation completed. {job.local_path} registered key {job.config_out.key}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.local_path is not None
|
||||
assert job.config_out is not None
|
||||
key = job.config_out.key
|
||||
self._event_bus.emit_model_install_completed(str(job.source), key)
|
||||
|
||||
def _signal_job_errored(self, job: ModelInstallJob, excp: Exception) -> None:
|
||||
job.set_error(excp)
|
||||
self._logger.info(f"{job.source}: model installation encountered an exception: {job.error_type}")
|
||||
if self._event_bus:
|
||||
error_type = job.error_type
|
||||
error = job.error
|
||||
assert error_type is not None
|
||||
assert error is not None
|
||||
self._event_bus.emit_model_install_error(str(job.source), error_type, error)
|
||||
|
||||
def register_path(
|
||||
self,
|
||||
model_path: Union[Path, str],
|
||||
config: Optional[Dict[str, Any]] = None,
|
||||
) -> str: # noqa D102
|
||||
model_path = Path(model_path)
|
||||
config = config or {}
|
||||
if config.get("source") is None:
|
||||
config["source"] = model_path.resolve().as_posix()
|
||||
return self._register(model_path, config)
|
||||
|
||||
def install_path(
|
||||
self,
|
||||
model_path: Union[Path, str],
|
||||
config: Optional[Dict[str, Any]] = None,
|
||||
) -> str: # noqa D102
|
||||
model_path = Path(model_path)
|
||||
config = config or {}
|
||||
if config.get("source") is None:
|
||||
config["source"] = model_path.resolve().as_posix()
|
||||
|
||||
info: AnyModelConfig = self._probe_model(Path(model_path), config)
|
||||
old_hash = info.original_hash
|
||||
dest_path = self.app_config.models_path / info.base.value / info.type.value / model_path.name
|
||||
new_path = self._copy_model(model_path, dest_path)
|
||||
new_hash = FastModelHash.hash(new_path)
|
||||
assert new_hash == old_hash, f"{model_path}: Model hash changed during installation, possibly corrupted."
|
||||
|
||||
return self._register(
|
||||
new_path,
|
||||
config,
|
||||
info,
|
||||
)
|
||||
|
||||
def import_model(
|
||||
self,
|
||||
source: ModelSource,
|
||||
config: Optional[Dict[str, Any]] = None,
|
||||
) -> ModelInstallJob: # noqa D102
|
||||
if not config:
|
||||
config = {}
|
||||
|
||||
# Installing a local path
|
||||
if isinstance(source, LocalModelSource) and Path(source.path).exists(): # a path that is already on disk
|
||||
job = ModelInstallJob(
|
||||
source=source,
|
||||
config_in=config,
|
||||
local_path=Path(source.path),
|
||||
)
|
||||
self._install_jobs.append(job)
|
||||
self._install_queue.put(job)
|
||||
return job
|
||||
|
||||
else: # here is where we'd download a URL or repo_id. Implementation pending download queue.
|
||||
raise UnknownModelException("File or directory not found")
|
||||
|
||||
def list_jobs(self) -> List[ModelInstallJob]: # noqa D102
|
||||
return self._install_jobs
|
||||
|
||||
def get_job(self, source: ModelSource) -> List[ModelInstallJob]: # noqa D102
|
||||
return [x for x in self._install_jobs if x.source == source]
|
||||
|
||||
def wait_for_installs(self) -> List[ModelInstallJob]: # noqa D102
|
||||
self._install_queue.join()
|
||||
return self._install_jobs
|
||||
|
||||
def prune_jobs(self) -> None:
|
||||
"""Prune all completed and errored jobs."""
|
||||
unfinished_jobs = [
|
||||
x for x in self._install_jobs if x.status not in [InstallStatus.COMPLETED, InstallStatus.ERROR]
|
||||
]
|
||||
self._install_jobs = unfinished_jobs
|
||||
|
||||
def sync_to_config(self) -> None:
|
||||
"""Synchronize models on disk to those in the config record store database."""
|
||||
self._scan_models_directory()
|
||||
if autoimport := self._app_config.autoimport_dir:
|
||||
self._logger.info("Scanning autoimport directory for new models")
|
||||
installed = self.scan_directory(self._app_config.root_path / autoimport)
|
||||
self._logger.info(f"{len(installed)} new models registered")
|
||||
self._logger.info("Model installer (re)initialized")
|
||||
|
||||
def scan_directory(self, scan_dir: Path, install: bool = False) -> List[str]: # noqa D102
|
||||
self._cached_model_paths = {Path(x.path) for x in self.record_store.all_models()}
|
||||
callback = self._scan_install if install else self._scan_register
|
||||
search = ModelSearch(on_model_found=callback)
|
||||
self._models_installed: Set[str] = set()
|
||||
search.search(scan_dir)
|
||||
return list(self._models_installed)
|
||||
|
||||
def _scan_models_directory(self) -> None:
|
||||
"""
|
||||
Scan the models directory for new and missing models.
|
||||
|
||||
New models will be added to the storage backend. Missing models
|
||||
will be deleted.
|
||||
"""
|
||||
defunct_models = set()
|
||||
installed = set()
|
||||
|
||||
with Chdir(self._app_config.models_path):
|
||||
self._logger.info("Checking for models that have been moved or deleted from disk")
|
||||
for model_config in self.record_store.all_models():
|
||||
path = Path(model_config.path)
|
||||
if not path.exists():
|
||||
self._logger.info(f"{model_config.name}: path {path.as_posix()} no longer exists. Unregistering")
|
||||
defunct_models.add(model_config.key)
|
||||
for key in defunct_models:
|
||||
self.unregister(key)
|
||||
|
||||
self._logger.info(f"Scanning {self._app_config.models_path} for new and orphaned models")
|
||||
for cur_base_model in BaseModelType:
|
||||
for cur_model_type in ModelType:
|
||||
models_dir = Path(cur_base_model.value, cur_model_type.value)
|
||||
installed.update(self.scan_directory(models_dir))
|
||||
self._logger.info(f"{len(installed)} new models registered; {len(defunct_models)} unregistered")
|
||||
|
||||
def _sync_model_path(self, key: str, ignore_hash_change: bool = False) -> AnyModelConfig:
|
||||
"""
|
||||
Move model into the location indicated by its basetype, type and name.
|
||||
|
||||
Call this after updating a model's attributes in order to move
|
||||
the model's path into the location indicated by its basetype, type and
|
||||
name. Applies only to models whose paths are within the root `models_dir`
|
||||
directory.
|
||||
|
||||
May raise an UnknownModelException.
|
||||
"""
|
||||
model = self.record_store.get_model(key)
|
||||
old_path = Path(model.path)
|
||||
models_dir = self.app_config.models_path
|
||||
|
||||
if not old_path.is_relative_to(models_dir):
|
||||
return model
|
||||
|
||||
new_path = models_dir / model.base.value / model.type.value / model.name
|
||||
self._logger.info(f"Moving {model.name} to {new_path}.")
|
||||
new_path = self._move_model(old_path, new_path)
|
||||
new_hash = FastModelHash.hash(new_path)
|
||||
model.path = new_path.relative_to(models_dir).as_posix()
|
||||
if model.current_hash != new_hash:
|
||||
assert (
|
||||
ignore_hash_change
|
||||
), f"{model.name}: Model hash changed during installation, model is possibly corrupted"
|
||||
model.current_hash = new_hash
|
||||
self._logger.info(f"Model has new hash {model.current_hash}, but will continue to be identified by {key}")
|
||||
self.record_store.update_model(key, model)
|
||||
return model
|
||||
|
||||
def _scan_register(self, model: Path) -> bool:
|
||||
if model in self._cached_model_paths:
|
||||
return True
|
||||
try:
|
||||
id = self.register_path(model)
|
||||
self._sync_model_path(id) # possibly move it to right place in `models`
|
||||
self._logger.info(f"Registered {model.name} with id {id}")
|
||||
self._models_installed.add(id)
|
||||
except DuplicateModelException:
|
||||
pass
|
||||
return True
|
||||
|
||||
def _scan_install(self, model: Path) -> bool:
|
||||
if model in self._cached_model_paths:
|
||||
return True
|
||||
try:
|
||||
id = self.install_path(model)
|
||||
self._logger.info(f"Installed {model} with id {id}")
|
||||
self._models_installed.add(id)
|
||||
except DuplicateModelException:
|
||||
pass
|
||||
return True
|
||||
|
||||
def unregister(self, key: str) -> None: # noqa D102
|
||||
self.record_store.del_model(key)
|
||||
|
||||
def delete(self, key: str) -> None: # noqa D102
|
||||
"""Unregister the model. Delete its files only if they are within our models directory."""
|
||||
model = self.record_store.get_model(key)
|
||||
models_dir = self.app_config.models_path
|
||||
model_path = models_dir / model.path
|
||||
if model_path.is_relative_to(models_dir):
|
||||
self.unconditionally_delete(key)
|
||||
else:
|
||||
self.unregister(key)
|
||||
|
||||
def unconditionally_delete(self, key: str) -> None: # noqa D102
|
||||
model = self.record_store.get_model(key)
|
||||
path = self.app_config.models_path / model.path
|
||||
if path.is_dir():
|
||||
rmtree(path)
|
||||
else:
|
||||
path.unlink()
|
||||
self.unregister(key)
|
||||
|
||||
def _copy_model(self, old_path: Path, new_path: Path) -> Path:
|
||||
if old_path == new_path:
|
||||
return old_path
|
||||
new_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if old_path.is_dir():
|
||||
copytree(old_path, new_path)
|
||||
else:
|
||||
copyfile(old_path, new_path)
|
||||
return new_path
|
||||
|
||||
def _move_model(self, old_path: Path, new_path: Path) -> Path:
|
||||
if old_path == new_path:
|
||||
return old_path
|
||||
|
||||
new_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# if path already exists then we jigger the name to make it unique
|
||||
counter: int = 1
|
||||
while new_path.exists():
|
||||
path = new_path.with_stem(new_path.stem + f"_{counter:02d}")
|
||||
if not path.exists():
|
||||
new_path = path
|
||||
counter += 1
|
||||
move(old_path, new_path)
|
||||
return new_path
|
||||
|
||||
def _probe_model(self, model_path: Path, config: Optional[Dict[str, Any]] = None) -> AnyModelConfig:
|
||||
info: AnyModelConfig = ModelProbe.probe(Path(model_path))
|
||||
if config: # used to override probe fields
|
||||
for key, value in config.items():
|
||||
setattr(info, key, value)
|
||||
return info
|
||||
|
||||
def _create_key(self) -> str:
|
||||
return sha256(randbytes(100)).hexdigest()[0:32]
|
||||
|
||||
def _register(
|
||||
self, model_path: Path, config: Optional[Dict[str, Any]] = None, info: Optional[AnyModelConfig] = None
|
||||
) -> str:
|
||||
info = info or ModelProbe.probe(model_path, config)
|
||||
key = self._create_key()
|
||||
|
||||
model_path = model_path.absolute()
|
||||
if model_path.is_relative_to(self.app_config.models_path):
|
||||
model_path = model_path.relative_to(self.app_config.models_path)
|
||||
|
||||
info.path = model_path.as_posix()
|
||||
|
||||
# add 'main' specific fields
|
||||
if hasattr(info, "config"):
|
||||
# make config relative to our root
|
||||
legacy_conf = (self.app_config.root_dir / self.app_config.legacy_conf_dir / info.config).resolve()
|
||||
info.config = legacy_conf.relative_to(self.app_config.root_dir).as_posix()
|
||||
self.record_store.add_model(key, info)
|
||||
return key
|
||||
@@ -6,11 +6,3 @@ from .model_records_base import ( # noqa F401
|
||||
UnknownModelException,
|
||||
)
|
||||
from .model_records_sql import ModelRecordServiceSQL # noqa F401
|
||||
|
||||
__all__ = [
|
||||
"ModelRecordServiceBase",
|
||||
"ModelRecordServiceSQL",
|
||||
"DuplicateModelException",
|
||||
"InvalidModelException",
|
||||
"UnknownModelException",
|
||||
]
|
||||
|
||||
@@ -7,7 +7,10 @@ from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelFormat, ModelType
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType
|
||||
|
||||
# should match the InvokeAI version when this is first released.
|
||||
CONFIG_FILE_VERSION = "3.2.0"
|
||||
|
||||
|
||||
class DuplicateModelException(Exception):
|
||||
@@ -29,6 +32,12 @@ class ConfigFileVersionMismatchException(Exception):
|
||||
class ModelRecordServiceBase(ABC):
|
||||
"""Abstract base class for storage and retrieval of model configs."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def version(self) -> str:
|
||||
"""Return the config file/database schema version."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def add_model(self, key: str, config: Union[dict, AnyModelConfig]) -> AnyModelConfig:
|
||||
"""
|
||||
@@ -106,7 +115,6 @@ class ModelRecordServiceBase(ABC):
|
||||
model_name: Optional[str] = None,
|
||||
base_model: Optional[BaseModelType] = None,
|
||||
model_type: Optional[ModelType] = None,
|
||||
model_format: Optional[ModelFormat] = None,
|
||||
) -> List[AnyModelConfig]:
|
||||
"""
|
||||
Return models matching name, base and/or type.
|
||||
@@ -114,7 +122,6 @@ class ModelRecordServiceBase(ABC):
|
||||
:param model_name: Filter by name of model (optional)
|
||||
:param base_model: Filter by base model (optional)
|
||||
:param model_type: Filter by type of model (optional)
|
||||
:param model_format: Filter by model format (e.g. "diffusers") (optional)
|
||||
|
||||
If none of the optional filters are passed, will return all
|
||||
models in the database.
|
||||
|
||||
@@ -49,12 +49,12 @@ from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelConfigFactory,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
)
|
||||
|
||||
from ..shared.sqlite.sqlite_database import SqliteDatabase
|
||||
from .model_records_base import (
|
||||
CONFIG_FILE_VERSION,
|
||||
DuplicateModelException,
|
||||
ModelRecordServiceBase,
|
||||
UnknownModelException,
|
||||
@@ -78,6 +78,85 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
self._db = db
|
||||
self._cursor = self._db.conn.cursor()
|
||||
|
||||
with self._db.lock:
|
||||
# Enable foreign keys
|
||||
self._db.conn.execute("PRAGMA foreign_keys = ON;")
|
||||
self._create_tables()
|
||||
self._db.conn.commit()
|
||||
assert (
|
||||
str(self.version) == CONFIG_FILE_VERSION
|
||||
), f"Model config version {self.version} does not match expected version {CONFIG_FILE_VERSION}"
|
||||
|
||||
def _create_tables(self) -> None:
|
||||
"""Create sqlite3 tables."""
|
||||
# model_config table breaks out the fields that are common to all config objects
|
||||
# and puts class-specific ones in a serialized json object
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS model_config (
|
||||
id TEXT NOT NULL PRIMARY KEY,
|
||||
-- The next 3 fields are enums in python, unrestricted string here
|
||||
base TEXT NOT NULL,
|
||||
type TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
path TEXT NOT NULL,
|
||||
original_hash TEXT, -- could be null
|
||||
-- Serialized JSON representation of the whole config object,
|
||||
-- which will contain additional fields from subclasses
|
||||
config TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- unique constraint on combo of name, base and type
|
||||
UNIQUE(name, base, type)
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
# metadata table
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS model_manager_metadata (
|
||||
metadata_key TEXT NOT NULL PRIMARY KEY,
|
||||
metadata_value TEXT NOT NULL
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
# Add trigger for `updated_at`.
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS model_config_updated_at
|
||||
AFTER UPDATE
|
||||
ON model_config FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE model_config SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE id = old.id;
|
||||
END;
|
||||
"""
|
||||
)
|
||||
|
||||
# Add indexes for searchable fields
|
||||
for stmt in [
|
||||
"CREATE INDEX IF NOT EXISTS base_index ON model_config(base);",
|
||||
"CREATE INDEX IF NOT EXISTS type_index ON model_config(type);",
|
||||
"CREATE INDEX IF NOT EXISTS name_index ON model_config(name);",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS path_index ON model_config(path);",
|
||||
]:
|
||||
self._cursor.execute(stmt)
|
||||
|
||||
# Add our version to the metadata table
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
INSERT OR IGNORE into model_manager_metadata (
|
||||
metadata_key,
|
||||
metadata_value
|
||||
)
|
||||
VALUES (?,?);
|
||||
""",
|
||||
("version", CONFIG_FILE_VERSION),
|
||||
)
|
||||
|
||||
def add_model(self, key: str, config: Union[dict, AnyModelConfig]) -> AnyModelConfig:
|
||||
"""
|
||||
Add a model to the database.
|
||||
@@ -96,13 +175,21 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
"""--sql
|
||||
INSERT INTO model_config (
|
||||
id,
|
||||
base,
|
||||
type,
|
||||
name,
|
||||
path,
|
||||
original_hash,
|
||||
config
|
||||
)
|
||||
VALUES (?,?,?);
|
||||
VALUES (?,?,?,?,?,?,?);
|
||||
""",
|
||||
(
|
||||
key,
|
||||
record.base,
|
||||
record.type,
|
||||
record.name,
|
||||
record.path,
|
||||
record.original_hash,
|
||||
json_serialized,
|
||||
),
|
||||
@@ -127,6 +214,22 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
|
||||
return self.get_model(key)
|
||||
|
||||
@property
|
||||
def version(self) -> str:
|
||||
"""Return the version of the database schema."""
|
||||
with self._db.lock:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT metadata_value FROM model_manager_metadata
|
||||
WHERE metadata_key=?;
|
||||
""",
|
||||
("version",),
|
||||
)
|
||||
rows = self._cursor.fetchone()
|
||||
if not rows:
|
||||
raise KeyError("Models database does not have metadata key 'version'")
|
||||
return rows[0]
|
||||
|
||||
def del_model(self, key: str) -> None:
|
||||
"""
|
||||
Delete a model.
|
||||
@@ -166,11 +269,14 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
UPDATE model_config
|
||||
SET
|
||||
SET base=?,
|
||||
type=?,
|
||||
name=?,
|
||||
path=?,
|
||||
config=?
|
||||
WHERE id=?;
|
||||
""",
|
||||
(json_serialized, key),
|
||||
(record.base, record.type, record.name, record.path, json_serialized, key),
|
||||
)
|
||||
if self._cursor.rowcount == 0:
|
||||
raise UnknownModelException("model not found")
|
||||
@@ -226,7 +332,6 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
model_name: Optional[str] = None,
|
||||
base_model: Optional[BaseModelType] = None,
|
||||
model_type: Optional[ModelType] = None,
|
||||
model_format: Optional[ModelFormat] = None,
|
||||
) -> List[AnyModelConfig]:
|
||||
"""
|
||||
Return models matching name, base and/or type.
|
||||
@@ -234,7 +339,6 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
:param model_name: Filter by name of model (optional)
|
||||
:param base_model: Filter by base model (optional)
|
||||
:param model_type: Filter by type of model (optional)
|
||||
:param model_format: Filter by model format (e.g. "diffusers") (optional)
|
||||
|
||||
If none of the optional filters are passed, will return all
|
||||
models in the database.
|
||||
@@ -251,9 +355,6 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
if model_type:
|
||||
where_clause.append("type=?")
|
||||
bindings.append(model_type)
|
||||
if model_format:
|
||||
where_clause.append("format=?")
|
||||
bindings.append(model_format)
|
||||
where = f"WHERE {' AND '.join(where_clause)}" if where_clause else ""
|
||||
with self._db.lock:
|
||||
self._cursor.execute(
|
||||
@@ -273,7 +374,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT config FROM model_config
|
||||
WHERE path=?;
|
||||
WHERE model_path=?;
|
||||
""",
|
||||
(str(path),),
|
||||
)
|
||||
|
||||
@@ -50,6 +50,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
self.__lock = db.lock
|
||||
self.__conn = db.conn
|
||||
self.__cursor = self.__conn.cursor()
|
||||
self._create_tables()
|
||||
|
||||
def _match_event_name(self, event: FastAPIEvent, match_in: list[str]) -> bool:
|
||||
return event[1]["event"] in match_in
|
||||
@@ -97,6 +98,123 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
except SessionQueueItemNotFoundError:
|
||||
return
|
||||
|
||||
def _create_tables(self) -> None:
|
||||
"""Creates the session queue tables, indicies, and triggers"""
|
||||
try:
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS session_queue (
|
||||
item_id INTEGER PRIMARY KEY AUTOINCREMENT, -- used for ordering, cursor pagination
|
||||
batch_id TEXT NOT NULL, -- identifier of the batch this queue item belongs to
|
||||
queue_id TEXT NOT NULL, -- identifier of the queue this queue item belongs to
|
||||
session_id TEXT NOT NULL UNIQUE, -- duplicated data from the session column, for ease of access
|
||||
field_values TEXT, -- NULL if no values are associated with this queue item
|
||||
session TEXT NOT NULL, -- the session to be executed
|
||||
status TEXT NOT NULL DEFAULT 'pending', -- the status of the queue item, one of 'pending', 'in_progress', 'completed', 'failed', 'canceled'
|
||||
priority INTEGER NOT NULL DEFAULT 0, -- the priority, higher is more important
|
||||
error TEXT, -- any errors associated with this queue item
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')), -- updated via trigger
|
||||
started_at DATETIME, -- updated via trigger
|
||||
completed_at DATETIME -- updated via trigger, completed items are cleaned up on application startup
|
||||
-- Ideally this is a FK, but graph_executions uses INSERT OR REPLACE, and REPLACE triggers the ON DELETE CASCADE...
|
||||
-- FOREIGN KEY (session_id) REFERENCES graph_executions (id) ON DELETE CASCADE
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_session_queue_item_id ON session_queue(item_id);
|
||||
"""
|
||||
)
|
||||
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_session_queue_session_id ON session_queue(session_id);
|
||||
"""
|
||||
)
|
||||
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_session_queue_batch_id ON session_queue(batch_id);
|
||||
"""
|
||||
)
|
||||
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_session_queue_created_priority ON session_queue(priority);
|
||||
"""
|
||||
)
|
||||
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_session_queue_created_status ON session_queue(status);
|
||||
"""
|
||||
)
|
||||
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_session_queue_completed_at
|
||||
AFTER UPDATE OF status ON session_queue
|
||||
FOR EACH ROW
|
||||
WHEN
|
||||
NEW.status = 'completed'
|
||||
OR NEW.status = 'failed'
|
||||
OR NEW.status = 'canceled'
|
||||
BEGIN
|
||||
UPDATE session_queue
|
||||
SET completed_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE item_id = NEW.item_id;
|
||||
END;
|
||||
"""
|
||||
)
|
||||
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_session_queue_started_at
|
||||
AFTER UPDATE OF status ON session_queue
|
||||
FOR EACH ROW
|
||||
WHEN
|
||||
NEW.status = 'in_progress'
|
||||
BEGIN
|
||||
UPDATE session_queue
|
||||
SET started_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE item_id = NEW.item_id;
|
||||
END;
|
||||
"""
|
||||
)
|
||||
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_session_queue_updated_at
|
||||
AFTER UPDATE
|
||||
ON session_queue FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE session_queue
|
||||
SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE item_id = old.item_id;
|
||||
END;
|
||||
"""
|
||||
)
|
||||
|
||||
self.__cursor.execute("PRAGMA table_info(session_queue)")
|
||||
columns = [column[1] for column in self.__cursor.fetchall()]
|
||||
if "workflow" not in columns:
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
ALTER TABLE session_queue ADD COLUMN workflow TEXT;
|
||||
"""
|
||||
)
|
||||
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
|
||||
def _set_in_progress_to_canceled(self) -> None:
|
||||
"""
|
||||
Sets all in_progress queue items to canceled. Run on app startup, not associated with any queue.
|
||||
|
||||
@@ -3,65 +3,45 @@ import threading
|
||||
from logging import Logger
|
||||
from pathlib import Path
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.shared.sqlite.sqlite_common import sqlite_memory
|
||||
|
||||
|
||||
class SqliteDatabase:
|
||||
"""
|
||||
Manages a connection to an SQLite database.
|
||||
def __init__(self, config: InvokeAIAppConfig, logger: Logger):
|
||||
self._logger = logger
|
||||
self._config = config
|
||||
|
||||
:param db_path: Path to the database file. If None, an in-memory database is used.
|
||||
:param logger: Logger to use for logging.
|
||||
:param verbose: Whether to log SQL statements. Provides `logger.debug` as the SQLite trace callback.
|
||||
|
||||
This is a light wrapper around the `sqlite3` module, providing a few conveniences:
|
||||
- The database file is written to disk if it does not exist.
|
||||
- Foreign key constraints are enabled by default.
|
||||
- The connection is configured to use the `sqlite3.Row` row factory.
|
||||
|
||||
In addition to the constructor args, the instance provides the following attributes and methods:
|
||||
- `conn`: A `sqlite3.Connection` object. Note that the connection must never be closed if the database is in-memory.
|
||||
- `lock`: A shared re-entrant lock, used to approximate thread safety.
|
||||
- `clean()`: Runs the SQL `VACUUM;` command and reports on the freed space.
|
||||
"""
|
||||
|
||||
def __init__(self, db_path: Path | None, logger: Logger, verbose: bool = False) -> None:
|
||||
"""Initializes the database. This is used internally by the class constructor."""
|
||||
self.logger = logger
|
||||
self.db_path = db_path
|
||||
self.verbose = verbose
|
||||
|
||||
if not self.db_path:
|
||||
logger.info("Initializing in-memory database")
|
||||
if self._config.use_memory_db:
|
||||
self.db_path = sqlite_memory
|
||||
logger.info("Using in-memory database")
|
||||
else:
|
||||
self.db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.logger.info(f"Initializing database at {self.db_path}")
|
||||
db_path = self._config.db_path
|
||||
db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.db_path = str(db_path)
|
||||
self._logger.info(f"Using database at {self.db_path}")
|
||||
|
||||
self.conn = sqlite3.connect(database=self.db_path or sqlite_memory, check_same_thread=False)
|
||||
self.conn = sqlite3.connect(self.db_path, check_same_thread=False)
|
||||
self.lock = threading.RLock()
|
||||
self.conn.row_factory = sqlite3.Row
|
||||
|
||||
if self.verbose:
|
||||
self.conn.set_trace_callback(self.logger.debug)
|
||||
if self._config.log_sql:
|
||||
self.conn.set_trace_callback(self._logger.debug)
|
||||
|
||||
self.conn.execute("PRAGMA foreign_keys = ON;")
|
||||
|
||||
def clean(self) -> None:
|
||||
"""
|
||||
Cleans the database by running the VACUUM command, reporting on the freed space.
|
||||
"""
|
||||
# No need to clean in-memory database
|
||||
if not self.db_path:
|
||||
return
|
||||
with self.lock:
|
||||
try:
|
||||
if self.db_path == sqlite_memory:
|
||||
return
|
||||
initial_db_size = Path(self.db_path).stat().st_size
|
||||
self.conn.execute("VACUUM;")
|
||||
self.conn.commit()
|
||||
final_db_size = Path(self.db_path).stat().st_size
|
||||
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
|
||||
if freed_space_in_mb > 0:
|
||||
self.logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
|
||||
self._logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error cleaning database: {e}")
|
||||
self._logger.error(f"Error cleaning database: {e}")
|
||||
raise
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
from logging import Logger
|
||||
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.app.services.image_files.image_files_base import ImageFileStorageBase
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_1 import build_migration_1
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_2 import build_migration_2
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_3 import build_migration_3
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
||||
|
||||
|
||||
def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileStorageBase) -> SqliteDatabase:
|
||||
"""
|
||||
Initializes the SQLite database.
|
||||
|
||||
:param config: The app config
|
||||
:param logger: The logger
|
||||
:param image_files: The image files service (used by migration 2)
|
||||
|
||||
This function:
|
||||
- Instantiates a :class:`SqliteDatabase`
|
||||
- Instantiates a :class:`SqliteMigrator` and registers all migrations
|
||||
- Runs all migrations
|
||||
"""
|
||||
db_path = None if config.use_memory_db else config.db_path
|
||||
db = SqliteDatabase(db_path=db_path, logger=logger, verbose=config.log_sql)
|
||||
|
||||
migrator = SqliteMigrator(db=db)
|
||||
migrator.register_migration(build_migration_1())
|
||||
migrator.register_migration(build_migration_2(image_files=image_files, logger=logger))
|
||||
migrator.register_migration(build_migration_3())
|
||||
migrator.run_migrations()
|
||||
|
||||
return db
|
||||
@@ -1,372 +0,0 @@
|
||||
import sqlite3
|
||||
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
|
||||
|
||||
class Migration1Callback:
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Migration callback for database version 1."""
|
||||
|
||||
self._create_board_images(cursor)
|
||||
self._create_boards(cursor)
|
||||
self._create_images(cursor)
|
||||
self._create_model_config(cursor)
|
||||
self._create_session_queue(cursor)
|
||||
self._create_workflow_images(cursor)
|
||||
self._create_workflows(cursor)
|
||||
|
||||
def _create_board_images(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Creates the `board_images` table, indices and triggers."""
|
||||
tables = [
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS board_images (
|
||||
board_id TEXT NOT NULL,
|
||||
image_name TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Soft delete, currently unused
|
||||
deleted_at DATETIME,
|
||||
-- enforce one-to-many relationship between boards and images using PK
|
||||
-- (we can extend this to many-to-many later)
|
||||
PRIMARY KEY (image_name),
|
||||
FOREIGN KEY (board_id) REFERENCES boards (board_id) ON DELETE CASCADE,
|
||||
FOREIGN KEY (image_name) REFERENCES images (image_name) ON DELETE CASCADE
|
||||
);
|
||||
"""
|
||||
]
|
||||
|
||||
indices = [
|
||||
"CREATE INDEX IF NOT EXISTS idx_board_images_board_id ON board_images (board_id);",
|
||||
"CREATE INDEX IF NOT EXISTS idx_board_images_board_id_created_at ON board_images (board_id, created_at);",
|
||||
]
|
||||
|
||||
triggers = [
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_board_images_updated_at
|
||||
AFTER UPDATE
|
||||
ON board_images FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE board_images SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE board_id = old.board_id AND image_name = old.image_name;
|
||||
END;
|
||||
"""
|
||||
]
|
||||
|
||||
for stmt in tables + indices + triggers:
|
||||
cursor.execute(stmt)
|
||||
|
||||
def _create_boards(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Creates the `boards` table, indices and triggers."""
|
||||
tables = [
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS boards (
|
||||
board_id TEXT NOT NULL PRIMARY KEY,
|
||||
board_name TEXT NOT NULL,
|
||||
cover_image_name TEXT,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Soft delete, currently unused
|
||||
deleted_at DATETIME,
|
||||
FOREIGN KEY (cover_image_name) REFERENCES images (image_name) ON DELETE SET NULL
|
||||
);
|
||||
"""
|
||||
]
|
||||
|
||||
indices = ["CREATE INDEX IF NOT EXISTS idx_boards_created_at ON boards (created_at);"]
|
||||
|
||||
triggers = [
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_boards_updated_at
|
||||
AFTER UPDATE
|
||||
ON boards FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE boards SET updated_at = current_timestamp
|
||||
WHERE board_id = old.board_id;
|
||||
END;
|
||||
"""
|
||||
]
|
||||
|
||||
for stmt in tables + indices + triggers:
|
||||
cursor.execute(stmt)
|
||||
|
||||
def _create_images(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Creates the `images` table, indices and triggers. Adds the `starred` column."""
|
||||
|
||||
tables = [
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS images (
|
||||
image_name TEXT NOT NULL PRIMARY KEY,
|
||||
-- This is an enum in python, unrestricted string here for flexibility
|
||||
image_origin TEXT NOT NULL,
|
||||
-- This is an enum in python, unrestricted string here for flexibility
|
||||
image_category TEXT NOT NULL,
|
||||
width INTEGER NOT NULL,
|
||||
height INTEGER NOT NULL,
|
||||
session_id TEXT,
|
||||
node_id TEXT,
|
||||
metadata TEXT,
|
||||
is_intermediate BOOLEAN DEFAULT FALSE,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Soft delete, currently unused
|
||||
deleted_at DATETIME
|
||||
);
|
||||
"""
|
||||
]
|
||||
|
||||
indices = [
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS idx_images_image_name ON images(image_name);",
|
||||
"CREATE INDEX IF NOT EXISTS idx_images_image_origin ON images(image_origin);",
|
||||
"CREATE INDEX IF NOT EXISTS idx_images_image_category ON images(image_category);",
|
||||
"CREATE INDEX IF NOT EXISTS idx_images_created_at ON images(created_at);",
|
||||
]
|
||||
|
||||
triggers = [
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_images_updated_at
|
||||
AFTER UPDATE
|
||||
ON images FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE images SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE image_name = old.image_name;
|
||||
END;
|
||||
"""
|
||||
]
|
||||
|
||||
# Add the 'starred' column to `images` if it doesn't exist
|
||||
cursor.execute("PRAGMA table_info(images)")
|
||||
columns = [column[1] for column in cursor.fetchall()]
|
||||
|
||||
if "starred" not in columns:
|
||||
tables.append("ALTER TABLE images ADD COLUMN starred BOOLEAN DEFAULT FALSE;")
|
||||
indices.append("CREATE INDEX IF NOT EXISTS idx_images_starred ON images(starred);")
|
||||
|
||||
for stmt in tables + indices + triggers:
|
||||
cursor.execute(stmt)
|
||||
|
||||
def _create_model_config(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Creates the `model_config` table, `model_manager_metadata` table, indices and triggers."""
|
||||
|
||||
tables = [
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS model_config (
|
||||
id TEXT NOT NULL PRIMARY KEY,
|
||||
-- The next 3 fields are enums in python, unrestricted string here
|
||||
base TEXT NOT NULL,
|
||||
type TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
path TEXT NOT NULL,
|
||||
original_hash TEXT, -- could be null
|
||||
-- Serialized JSON representation of the whole config object,
|
||||
-- which will contain additional fields from subclasses
|
||||
config TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- unique constraint on combo of name, base and type
|
||||
UNIQUE(name, base, type)
|
||||
);
|
||||
""",
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS model_manager_metadata (
|
||||
metadata_key TEXT NOT NULL PRIMARY KEY,
|
||||
metadata_value TEXT NOT NULL
|
||||
);
|
||||
""",
|
||||
]
|
||||
|
||||
# Add trigger for `updated_at`.
|
||||
triggers = [
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS model_config_updated_at
|
||||
AFTER UPDATE
|
||||
ON model_config FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE model_config SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE id = old.id;
|
||||
END;
|
||||
"""
|
||||
]
|
||||
|
||||
# Add indexes for searchable fields
|
||||
indices = [
|
||||
"CREATE INDEX IF NOT EXISTS base_index ON model_config(base);",
|
||||
"CREATE INDEX IF NOT EXISTS type_index ON model_config(type);",
|
||||
"CREATE INDEX IF NOT EXISTS name_index ON model_config(name);",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS path_index ON model_config(path);",
|
||||
]
|
||||
|
||||
for stmt in tables + indices + triggers:
|
||||
cursor.execute(stmt)
|
||||
|
||||
def _create_session_queue(self, cursor: sqlite3.Cursor) -> None:
|
||||
tables = [
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS session_queue (
|
||||
item_id INTEGER PRIMARY KEY AUTOINCREMENT, -- used for ordering, cursor pagination
|
||||
batch_id TEXT NOT NULL, -- identifier of the batch this queue item belongs to
|
||||
queue_id TEXT NOT NULL, -- identifier of the queue this queue item belongs to
|
||||
session_id TEXT NOT NULL UNIQUE, -- duplicated data from the session column, for ease of access
|
||||
field_values TEXT, -- NULL if no values are associated with this queue item
|
||||
session TEXT NOT NULL, -- the session to be executed
|
||||
status TEXT NOT NULL DEFAULT 'pending', -- the status of the queue item, one of 'pending', 'in_progress', 'completed', 'failed', 'canceled'
|
||||
priority INTEGER NOT NULL DEFAULT 0, -- the priority, higher is more important
|
||||
error TEXT, -- any errors associated with this queue item
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')), -- updated via trigger
|
||||
started_at DATETIME, -- updated via trigger
|
||||
completed_at DATETIME -- updated via trigger, completed items are cleaned up on application startup
|
||||
-- Ideally this is a FK, but graph_executions uses INSERT OR REPLACE, and REPLACE triggers the ON DELETE CASCADE...
|
||||
-- FOREIGN KEY (session_id) REFERENCES graph_executions (id) ON DELETE CASCADE
|
||||
);
|
||||
"""
|
||||
]
|
||||
|
||||
indices = [
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS idx_session_queue_item_id ON session_queue(item_id);",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS idx_session_queue_session_id ON session_queue(session_id);",
|
||||
"CREATE INDEX IF NOT EXISTS idx_session_queue_batch_id ON session_queue(batch_id);",
|
||||
"CREATE INDEX IF NOT EXISTS idx_session_queue_created_priority ON session_queue(priority);",
|
||||
"CREATE INDEX IF NOT EXISTS idx_session_queue_created_status ON session_queue(status);",
|
||||
]
|
||||
|
||||
triggers = [
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_session_queue_completed_at
|
||||
AFTER UPDATE OF status ON session_queue
|
||||
FOR EACH ROW
|
||||
WHEN
|
||||
NEW.status = 'completed'
|
||||
OR NEW.status = 'failed'
|
||||
OR NEW.status = 'canceled'
|
||||
BEGIN
|
||||
UPDATE session_queue
|
||||
SET completed_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE item_id = NEW.item_id;
|
||||
END;
|
||||
""",
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_session_queue_started_at
|
||||
AFTER UPDATE OF status ON session_queue
|
||||
FOR EACH ROW
|
||||
WHEN
|
||||
NEW.status = 'in_progress'
|
||||
BEGIN
|
||||
UPDATE session_queue
|
||||
SET started_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE item_id = NEW.item_id;
|
||||
END;
|
||||
""",
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_session_queue_updated_at
|
||||
AFTER UPDATE
|
||||
ON session_queue FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE session_queue
|
||||
SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE item_id = old.item_id;
|
||||
END;
|
||||
""",
|
||||
]
|
||||
|
||||
for stmt in tables + indices + triggers:
|
||||
cursor.execute(stmt)
|
||||
|
||||
def _create_workflow_images(self, cursor: sqlite3.Cursor) -> None:
|
||||
tables = [
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS workflow_images (
|
||||
workflow_id TEXT NOT NULL,
|
||||
image_name TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Soft delete, currently unused
|
||||
deleted_at DATETIME,
|
||||
-- enforce one-to-many relationship between workflows and images using PK
|
||||
-- (we can extend this to many-to-many later)
|
||||
PRIMARY KEY (image_name),
|
||||
FOREIGN KEY (workflow_id) REFERENCES workflows (workflow_id) ON DELETE CASCADE,
|
||||
FOREIGN KEY (image_name) REFERENCES images (image_name) ON DELETE CASCADE
|
||||
);
|
||||
"""
|
||||
]
|
||||
|
||||
indices = [
|
||||
"CREATE INDEX IF NOT EXISTS idx_workflow_images_workflow_id ON workflow_images (workflow_id);",
|
||||
"CREATE INDEX IF NOT EXISTS idx_workflow_images_workflow_id_created_at ON workflow_images (workflow_id, created_at);",
|
||||
]
|
||||
|
||||
triggers = [
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_workflow_images_updated_at
|
||||
AFTER UPDATE
|
||||
ON workflow_images FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE workflow_images SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE workflow_id = old.workflow_id AND image_name = old.image_name;
|
||||
END;
|
||||
"""
|
||||
]
|
||||
|
||||
for stmt in tables + indices + triggers:
|
||||
cursor.execute(stmt)
|
||||
|
||||
def _create_workflows(self, cursor: sqlite3.Cursor) -> None:
|
||||
tables = [
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS workflows (
|
||||
workflow TEXT NOT NULL,
|
||||
workflow_id TEXT GENERATED ALWAYS AS (json_extract(workflow, '$.id')) VIRTUAL NOT NULL UNIQUE, -- gets implicit index
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')) -- updated via trigger
|
||||
);
|
||||
"""
|
||||
]
|
||||
|
||||
triggers = [
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_workflows_updated_at
|
||||
AFTER UPDATE
|
||||
ON workflows FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE workflows
|
||||
SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE workflow_id = old.workflow_id;
|
||||
END;
|
||||
"""
|
||||
]
|
||||
|
||||
for stmt in tables + triggers:
|
||||
cursor.execute(stmt)
|
||||
|
||||
|
||||
def build_migration_1() -> Migration:
|
||||
"""
|
||||
Builds the migration from database version 0 (init) to 1.
|
||||
|
||||
This migration represents the state of the database circa InvokeAI v3.4.0, which was the last
|
||||
version to not use migrations to manage the database.
|
||||
|
||||
As such, this migration does include some ALTER statements, and the SQL statements are written
|
||||
to be idempotent.
|
||||
|
||||
- Create `board_images` junction table
|
||||
- Create `boards` table
|
||||
- Create `images` table, add `starred` column
|
||||
- Create `model_config` table
|
||||
- Create `session_queue` table
|
||||
- Create `workflow_images` junction table
|
||||
- Create `workflows` table
|
||||
"""
|
||||
|
||||
migration_1 = Migration(
|
||||
from_version=0,
|
||||
to_version=1,
|
||||
callback=Migration1Callback(),
|
||||
)
|
||||
|
||||
return migration_1
|
||||
@@ -1,206 +0,0 @@
|
||||
import sqlite3
|
||||
from logging import Logger
|
||||
|
||||
from pydantic import ValidationError
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.app.services.image_files.image_files_base import ImageFileStorageBase
|
||||
from invokeai.app.services.image_files.image_files_common import ImageFileNotFoundException
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import (
|
||||
UnsafeWorkflowWithVersionValidator,
|
||||
)
|
||||
|
||||
from .util.migrate_yaml_config_1 import MigrateModelYamlToDb1
|
||||
|
||||
|
||||
class Migration2Callback:
|
||||
def __init__(self, image_files: ImageFileStorageBase, logger: Logger):
|
||||
self._image_files = image_files
|
||||
self._logger = logger
|
||||
|
||||
def __call__(self, cursor: sqlite3.Cursor):
|
||||
self._add_images_has_workflow(cursor)
|
||||
self._add_session_queue_workflow(cursor)
|
||||
self._drop_old_workflow_tables(cursor)
|
||||
self._add_workflow_library(cursor)
|
||||
self._drop_model_manager_metadata(cursor)
|
||||
self._recreate_model_config(cursor)
|
||||
self._migrate_model_config_records(cursor)
|
||||
self._migrate_embedded_workflows(cursor)
|
||||
|
||||
def _add_images_has_workflow(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Add the `has_workflow` column to `images` table."""
|
||||
cursor.execute("PRAGMA table_info(images)")
|
||||
columns = [column[1] for column in cursor.fetchall()]
|
||||
|
||||
if "has_workflow" not in columns:
|
||||
cursor.execute("ALTER TABLE images ADD COLUMN has_workflow BOOLEAN DEFAULT FALSE;")
|
||||
|
||||
def _add_session_queue_workflow(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Add the `workflow` column to `session_queue` table."""
|
||||
|
||||
cursor.execute("PRAGMA table_info(session_queue)")
|
||||
columns = [column[1] for column in cursor.fetchall()]
|
||||
|
||||
if "workflow" not in columns:
|
||||
cursor.execute("ALTER TABLE session_queue ADD COLUMN workflow TEXT;")
|
||||
|
||||
def _drop_old_workflow_tables(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Drops the `workflows` and `workflow_images` tables."""
|
||||
cursor.execute("DROP TABLE IF EXISTS workflow_images;")
|
||||
cursor.execute("DROP TABLE IF EXISTS workflows;")
|
||||
|
||||
def _add_workflow_library(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Adds the `workflow_library` table and drops the `workflows` and `workflow_images` tables."""
|
||||
tables = [
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS workflow_library (
|
||||
workflow_id TEXT NOT NULL PRIMARY KEY,
|
||||
workflow TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- updated manually when retrieving workflow
|
||||
opened_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Generated columns, needed for indexing and searching
|
||||
category TEXT GENERATED ALWAYS as (json_extract(workflow, '$.meta.category')) VIRTUAL NOT NULL,
|
||||
name TEXT GENERATED ALWAYS as (json_extract(workflow, '$.name')) VIRTUAL NOT NULL,
|
||||
description TEXT GENERATED ALWAYS as (json_extract(workflow, '$.description')) VIRTUAL NOT NULL
|
||||
);
|
||||
""",
|
||||
]
|
||||
|
||||
indices = [
|
||||
"CREATE INDEX IF NOT EXISTS idx_workflow_library_created_at ON workflow_library(created_at);",
|
||||
"CREATE INDEX IF NOT EXISTS idx_workflow_library_updated_at ON workflow_library(updated_at);",
|
||||
"CREATE INDEX IF NOT EXISTS idx_workflow_library_opened_at ON workflow_library(opened_at);",
|
||||
"CREATE INDEX IF NOT EXISTS idx_workflow_library_category ON workflow_library(category);",
|
||||
"CREATE INDEX IF NOT EXISTS idx_workflow_library_name ON workflow_library(name);",
|
||||
"CREATE INDEX IF NOT EXISTS idx_workflow_library_description ON workflow_library(description);",
|
||||
]
|
||||
|
||||
triggers = [
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_workflow_library_updated_at
|
||||
AFTER UPDATE
|
||||
ON workflow_library FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE workflow_library
|
||||
SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE workflow_id = old.workflow_id;
|
||||
END;
|
||||
"""
|
||||
]
|
||||
|
||||
for stmt in tables + indices + triggers:
|
||||
cursor.execute(stmt)
|
||||
|
||||
def _drop_model_manager_metadata(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Drops the `model_manager_metadata` table."""
|
||||
cursor.execute("DROP TABLE IF EXISTS model_manager_metadata;")
|
||||
|
||||
def _recreate_model_config(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""
|
||||
Drops the `model_config` table, recreating it.
|
||||
|
||||
In 3.4.0, this table used explicit columns but was changed to use json_extract 3.5.0.
|
||||
|
||||
Because this table is not used in production, we are able to simply drop it and recreate it.
|
||||
"""
|
||||
|
||||
cursor.execute("DROP TABLE IF EXISTS model_config;")
|
||||
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS model_config (
|
||||
id TEXT NOT NULL PRIMARY KEY,
|
||||
-- The next 3 fields are enums in python, unrestricted string here
|
||||
base TEXT GENERATED ALWAYS as (json_extract(config, '$.base')) VIRTUAL NOT NULL,
|
||||
type TEXT GENERATED ALWAYS as (json_extract(config, '$.type')) VIRTUAL NOT NULL,
|
||||
name TEXT GENERATED ALWAYS as (json_extract(config, '$.name')) VIRTUAL NOT NULL,
|
||||
path TEXT GENERATED ALWAYS as (json_extract(config, '$.path')) VIRTUAL NOT NULL,
|
||||
format TEXT GENERATED ALWAYS as (json_extract(config, '$.format')) VIRTUAL NOT NULL,
|
||||
original_hash TEXT, -- could be null
|
||||
-- Serialized JSON representation of the whole config object,
|
||||
-- which will contain additional fields from subclasses
|
||||
config TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- unique constraint on combo of name, base and type
|
||||
UNIQUE(name, base, type)
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
def _migrate_model_config_records(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""After updating the model config table, we repopulate it."""
|
||||
model_record_migrator = MigrateModelYamlToDb1(cursor)
|
||||
model_record_migrator.migrate()
|
||||
|
||||
def _migrate_embedded_workflows(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""
|
||||
In the v3.5.0 release, InvokeAI changed how it handles embedded workflows. The `images` table in
|
||||
the database now has a `has_workflow` column, indicating if an image has a workflow embedded.
|
||||
|
||||
This migrate callback checks each image for the presence of an embedded workflow, then updates its entry
|
||||
in the database accordingly.
|
||||
"""
|
||||
# Get all image names
|
||||
cursor.execute("SELECT image_name FROM images")
|
||||
image_names: list[str] = [image[0] for image in cursor.fetchall()]
|
||||
total_image_names = len(image_names)
|
||||
|
||||
if not total_image_names:
|
||||
return
|
||||
|
||||
self._logger.info(f"Migrating workflows for {total_image_names} images")
|
||||
|
||||
# Migrate the images
|
||||
to_migrate: list[tuple[bool, str]] = []
|
||||
pbar = tqdm(image_names)
|
||||
for idx, image_name in enumerate(pbar):
|
||||
pbar.set_description(f"Checking image {idx + 1}/{total_image_names} for workflow")
|
||||
try:
|
||||
pil_image = self._image_files.get(image_name)
|
||||
except ImageFileNotFoundException:
|
||||
self._logger.warning(f"Image {image_name} not found, skipping")
|
||||
continue
|
||||
if "invokeai_workflow" in pil_image.info:
|
||||
try:
|
||||
UnsafeWorkflowWithVersionValidator.validate_json(pil_image.info.get("invokeai_workflow", ""))
|
||||
except ValidationError:
|
||||
self._logger.warning(f"Image {image_name} has invalid embedded workflow, skipping")
|
||||
continue
|
||||
to_migrate.append((True, image_name))
|
||||
|
||||
self._logger.info(f"Adding {len(to_migrate)} embedded workflows to database")
|
||||
cursor.executemany("UPDATE images SET has_workflow = ? WHERE image_name = ?", to_migrate)
|
||||
|
||||
|
||||
def build_migration_2(image_files: ImageFileStorageBase, logger: Logger) -> Migration:
|
||||
"""
|
||||
Builds the migration from database version 1 to 2.
|
||||
|
||||
Introduced in v3.5.0 for the new workflow library.
|
||||
|
||||
:param image_files: The image files service, used to check for embedded workflows
|
||||
:param logger: The logger, used to log progress during embedded workflows handling
|
||||
|
||||
This migration does the following:
|
||||
- Add `has_workflow` column to `images` table
|
||||
- Add `workflow` column to `session_queue` table
|
||||
- Drop `workflows` and `workflow_images` tables
|
||||
- Add `workflow_library` table
|
||||
- Drops the `model_manager_metadata` table
|
||||
- Drops the `model_config` table, recreating it (at this point, there is no user data in this table)
|
||||
- Populates the `has_workflow` column in the `images` table (requires `image_files` & `logger` dependencies)
|
||||
"""
|
||||
migration_2 = Migration(
|
||||
from_version=1,
|
||||
to_version=2,
|
||||
callback=Migration2Callback(image_files=image_files, logger=logger),
|
||||
)
|
||||
|
||||
return migration_2
|
||||
@@ -1,75 +0,0 @@
|
||||
import sqlite3
|
||||
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
|
||||
from .util.migrate_yaml_config_1 import MigrateModelYamlToDb1
|
||||
|
||||
|
||||
class Migration3Callback:
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
self._drop_model_manager_metadata(cursor)
|
||||
self._recreate_model_config(cursor)
|
||||
self._migrate_model_config_records(cursor)
|
||||
|
||||
def _drop_model_manager_metadata(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Drops the `model_manager_metadata` table."""
|
||||
cursor.execute("DROP TABLE IF EXISTS model_manager_metadata;")
|
||||
|
||||
def _recreate_model_config(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""
|
||||
Drops the `model_config` table, recreating it.
|
||||
|
||||
In 3.4.0, this table used explicit columns but was changed to use json_extract 3.5.0.
|
||||
|
||||
Because this table is not used in production, we are able to simply drop it and recreate it.
|
||||
"""
|
||||
|
||||
cursor.execute("DROP TABLE IF EXISTS model_config;")
|
||||
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS model_config (
|
||||
id TEXT NOT NULL PRIMARY KEY,
|
||||
-- The next 3 fields are enums in python, unrestricted string here
|
||||
base TEXT GENERATED ALWAYS as (json_extract(config, '$.base')) VIRTUAL NOT NULL,
|
||||
type TEXT GENERATED ALWAYS as (json_extract(config, '$.type')) VIRTUAL NOT NULL,
|
||||
name TEXT GENERATED ALWAYS as (json_extract(config, '$.name')) VIRTUAL NOT NULL,
|
||||
path TEXT GENERATED ALWAYS as (json_extract(config, '$.path')) VIRTUAL NOT NULL,
|
||||
format TEXT GENERATED ALWAYS as (json_extract(config, '$.format')) VIRTUAL NOT NULL,
|
||||
original_hash TEXT, -- could be null
|
||||
-- Serialized JSON representation of the whole config object,
|
||||
-- which will contain additional fields from subclasses
|
||||
config TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- unique constraint on combo of name, base and type
|
||||
UNIQUE(name, base, type)
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
def _migrate_model_config_records(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""After updating the model config table, we repopulate it."""
|
||||
model_record_migrator = MigrateModelYamlToDb1(cursor)
|
||||
model_record_migrator.migrate()
|
||||
|
||||
|
||||
def build_migration_3() -> Migration:
|
||||
"""
|
||||
Build the migration from database version 2 to 3.
|
||||
|
||||
This migration does the following:
|
||||
- Drops the `model_config` table, recreating it
|
||||
- Migrates data from `models.yaml` into the `model_config` table
|
||||
"""
|
||||
migration_3 = Migration(
|
||||
from_version=2,
|
||||
to_version=3,
|
||||
callback=Migration3Callback(),
|
||||
)
|
||||
|
||||
return migration_3
|
||||
@@ -1,148 +0,0 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein
|
||||
"""Migrate from the InvokeAI v2 models.yaml format to the v3 sqlite format."""
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from hashlib import sha1
|
||||
from logging import Logger
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from omegaconf import DictConfig, OmegaConf
|
||||
from pydantic import TypeAdapter
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_records import (
|
||||
DuplicateModelException,
|
||||
UnknownModelException,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelConfigFactory,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.hash import FastModelHash
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
ModelsValidator = TypeAdapter(AnyModelConfig)
|
||||
|
||||
|
||||
class MigrateModelYamlToDb1:
|
||||
"""
|
||||
Migrate the InvokeAI models.yaml format (VERSION 3.0.0) to SQL3 database format (VERSION 3.5.0).
|
||||
|
||||
The class has one externally useful method, migrate(), which scans the
|
||||
currently models.yaml file and imports all its entries into invokeai.db.
|
||||
|
||||
Use this way:
|
||||
|
||||
from invokeai.backend.model_manager/migrate_to_db import MigrateModelYamlToDb
|
||||
MigrateModelYamlToDb().migrate()
|
||||
|
||||
"""
|
||||
|
||||
config: InvokeAIAppConfig
|
||||
logger: Logger
|
||||
cursor: sqlite3.Cursor
|
||||
|
||||
def __init__(self, cursor: sqlite3.Cursor = None) -> None:
|
||||
self.config = InvokeAIAppConfig.get_config()
|
||||
self.config.parse_args()
|
||||
self.logger = InvokeAILogger.get_logger()
|
||||
self.cursor = cursor
|
||||
|
||||
def get_yaml(self) -> DictConfig:
|
||||
"""Fetch the models.yaml DictConfig for this installation."""
|
||||
yaml_path = self.config.model_conf_path
|
||||
omegaconf = OmegaConf.load(yaml_path)
|
||||
assert isinstance(omegaconf, DictConfig)
|
||||
return omegaconf
|
||||
|
||||
def migrate(self) -> None:
|
||||
"""Do the migration from models.yaml to invokeai.db."""
|
||||
try:
|
||||
yaml = self.get_yaml()
|
||||
except OSError:
|
||||
return
|
||||
|
||||
for model_key, stanza in yaml.items():
|
||||
if model_key == "__metadata__":
|
||||
assert (
|
||||
stanza["version"] == "3.0.0"
|
||||
), f"This script works on version 3.0.0 yaml files, but your configuration points to a {stanza['version']} version"
|
||||
continue
|
||||
|
||||
base_type, model_type, model_name = str(model_key).split("/")
|
||||
hash = FastModelHash.hash(self.config.models_path / stanza.path)
|
||||
assert isinstance(model_key, str)
|
||||
new_key = sha1(model_key.encode("utf-8")).hexdigest()
|
||||
|
||||
stanza["base"] = BaseModelType(base_type)
|
||||
stanza["type"] = ModelType(model_type)
|
||||
stanza["name"] = model_name
|
||||
stanza["original_hash"] = hash
|
||||
stanza["current_hash"] = hash
|
||||
|
||||
new_config: AnyModelConfig = ModelsValidator.validate_python(stanza) # type: ignore # see https://github.com/pydantic/pydantic/discussions/7094
|
||||
|
||||
try:
|
||||
if original_record := self._search_by_path(stanza.path):
|
||||
key = original_record.key
|
||||
self.logger.info(f"Updating model {model_name} with information from models.yaml using key {key}")
|
||||
self._update_model(key, new_config)
|
||||
else:
|
||||
self.logger.info(f"Adding model {model_name} with key {model_key}")
|
||||
self._add_model(new_key, new_config)
|
||||
except DuplicateModelException:
|
||||
self.logger.warning(f"Model {model_name} is already in the database")
|
||||
except UnknownModelException:
|
||||
self.logger.warning(f"Model at {stanza.path} could not be found in database")
|
||||
|
||||
def _search_by_path(self, path: Path) -> Optional[AnyModelConfig]:
|
||||
self.cursor.execute(
|
||||
"""--sql
|
||||
SELECT config FROM model_config
|
||||
WHERE path=?;
|
||||
""",
|
||||
(str(path),),
|
||||
)
|
||||
results = [ModelConfigFactory.make_config(json.loads(x[0])) for x in self.cursor.fetchall()]
|
||||
return results[0] if results else None
|
||||
|
||||
def _update_model(self, key: str, config: AnyModelConfig) -> None:
|
||||
record = ModelConfigFactory.make_config(config, key=key) # ensure it is a valid config obect
|
||||
json_serialized = record.model_dump_json() # and turn it into a json string.
|
||||
self.cursor.execute(
|
||||
"""--sql
|
||||
UPDATE model_config
|
||||
SET
|
||||
config=?
|
||||
WHERE id=?;
|
||||
""",
|
||||
(json_serialized, key),
|
||||
)
|
||||
if self.cursor.rowcount == 0:
|
||||
raise UnknownModelException("model not found")
|
||||
|
||||
def _add_model(self, key: str, config: AnyModelConfig) -> None:
|
||||
record = ModelConfigFactory.make_config(config, key=key) # ensure it is a valid config obect.
|
||||
json_serialized = record.model_dump_json() # and turn it into a json string.
|
||||
try:
|
||||
self.cursor.execute(
|
||||
"""--sql
|
||||
INSERT INTO model_config (
|
||||
id,
|
||||
original_hash,
|
||||
config
|
||||
)
|
||||
VALUES (?,?,?);
|
||||
""",
|
||||
(
|
||||
key,
|
||||
record.original_hash,
|
||||
json_serialized,
|
||||
),
|
||||
)
|
||||
except sqlite3.IntegrityError as exc:
|
||||
raise DuplicateModelException(f"{record.name}: model is already in database") from exc
|
||||
@@ -1,164 +0,0 @@
|
||||
import sqlite3
|
||||
from typing import Optional, Protocol, runtime_checkable
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class MigrateCallback(Protocol):
|
||||
"""
|
||||
A callback that performs a migration.
|
||||
|
||||
Migrate callbacks are provided an open cursor to the database. They should not commit their
|
||||
transaction; this is handled by the migrator.
|
||||
|
||||
If the callback needs to access additional dependencies, will be provided to the callback at runtime.
|
||||
|
||||
See :class:`Migration` for an example.
|
||||
"""
|
||||
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
...
|
||||
|
||||
|
||||
class MigrationError(RuntimeError):
|
||||
"""Raised when a migration fails."""
|
||||
|
||||
|
||||
class MigrationVersionError(ValueError):
|
||||
"""Raised when a migration version is invalid."""
|
||||
|
||||
|
||||
class Migration(BaseModel):
|
||||
"""
|
||||
Represents a migration for a SQLite database.
|
||||
|
||||
:param from_version: The database version on which this migration may be run
|
||||
:param to_version: The database version that results from this migration
|
||||
:param migrate_callback: The callback to run to perform the migration
|
||||
|
||||
Migration callbacks will be provided an open cursor to the database. They should not commit their
|
||||
transaction; this is handled by the migrator.
|
||||
|
||||
It is suggested to use a class to define the migration callback and a builder function to create
|
||||
the :class:`Migration`. This allows the callback to be provided with additional dependencies and
|
||||
keeps things tidy, as all migration logic is self-contained.
|
||||
|
||||
Example:
|
||||
```py
|
||||
# Define the migration callback class
|
||||
class Migration1Callback:
|
||||
# This migration needs a logger, so we define a class that accepts a logger in its constructor.
|
||||
def __init__(self, image_files: ImageFileStorageBase) -> None:
|
||||
self._image_files = ImageFileStorageBase
|
||||
|
||||
# This dunder method allows the instance of the class to be called like a function.
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
self._add_with_banana_column(cursor)
|
||||
self._do_something_with_images(cursor)
|
||||
|
||||
def _add_with_banana_column(self, cursor: sqlite3.Cursor) -> None:
|
||||
\"""Adds the with_banana column to the sushi table.\"""
|
||||
# Execute SQL using the cursor, taking care to *not commit* a transaction
|
||||
cursor.execute('ALTER TABLE sushi ADD COLUMN with_banana BOOLEAN DEFAULT TRUE;')
|
||||
|
||||
def _do_something_with_images(self, cursor: sqlite3.Cursor) -> None:
|
||||
\"""Does something with the image files service.\"""
|
||||
self._image_files.get(...)
|
||||
|
||||
# Define the migration builder function. This function creates an instance of the migration callback
|
||||
# class and returns a Migration.
|
||||
def build_migration_1(image_files: ImageFileStorageBase) -> Migration:
|
||||
\"""Builds the migration from database version 0 to 1.
|
||||
Requires the image files service to...
|
||||
\"""
|
||||
|
||||
migration_1 = Migration(
|
||||
from_version=0,
|
||||
to_version=1,
|
||||
migrate_callback=Migration1Callback(image_files=image_files),
|
||||
)
|
||||
|
||||
return migration_1
|
||||
|
||||
# Register the migration after all dependencies have been initialized
|
||||
db = SqliteDatabase(db_path, logger)
|
||||
migrator = SqliteMigrator(db)
|
||||
migrator.register_migration(build_migration_1(image_files))
|
||||
migrator.run_migrations()
|
||||
```
|
||||
"""
|
||||
|
||||
from_version: int = Field(ge=0, strict=True, description="The database version on which this migration may be run")
|
||||
to_version: int = Field(ge=1, strict=True, description="The database version that results from this migration")
|
||||
callback: MigrateCallback = Field(description="The callback to run to perform the migration")
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_to_version(self) -> "Migration":
|
||||
"""Validates that to_version is one greater than from_version."""
|
||||
if self.to_version != self.from_version + 1:
|
||||
raise MigrationVersionError("to_version must be one greater than from_version")
|
||||
return self
|
||||
|
||||
def __hash__(self) -> int:
|
||||
# Callables are not hashable, so we need to implement our own __hash__ function to use this class in a set.
|
||||
return hash((self.from_version, self.to_version))
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
|
||||
class MigrationSet:
|
||||
"""
|
||||
A set of Migrations. Performs validation during migration registration and provides utility methods.
|
||||
|
||||
Migrations should be registered with `register()`. Once all are registered, `validate_migration_chain()`
|
||||
should be called to ensure that the migrations form a single chain of migrations from version 0 to the latest version.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._migrations: set[Migration] = set()
|
||||
|
||||
def register(self, migration: Migration) -> None:
|
||||
"""Registers a migration."""
|
||||
migration_from_already_registered = any(m.from_version == migration.from_version for m in self._migrations)
|
||||
migration_to_already_registered = any(m.to_version == migration.to_version for m in self._migrations)
|
||||
if migration_from_already_registered or migration_to_already_registered:
|
||||
raise MigrationVersionError("Migration with from_version or to_version already registered")
|
||||
self._migrations.add(migration)
|
||||
|
||||
def get(self, from_version: int) -> Optional[Migration]:
|
||||
"""Gets the migration that may be run on the given database version."""
|
||||
# register() ensures that there is only one migration with a given from_version, so this is safe.
|
||||
return next((m for m in self._migrations if m.from_version == from_version), None)
|
||||
|
||||
def validate_migration_chain(self) -> None:
|
||||
"""
|
||||
Validates that the migrations form a single chain of migrations from version 0 to the latest version,
|
||||
Raises a MigrationError if there is a problem.
|
||||
"""
|
||||
if self.count == 0:
|
||||
return
|
||||
if self.latest_version == 0:
|
||||
return
|
||||
next_migration = self.get(from_version=0)
|
||||
if next_migration is None:
|
||||
raise MigrationError("Migration chain is fragmented")
|
||||
touched_count = 1
|
||||
while next_migration is not None:
|
||||
next_migration = self.get(next_migration.to_version)
|
||||
if next_migration is not None:
|
||||
touched_count += 1
|
||||
if touched_count != self.count:
|
||||
raise MigrationError("Migration chain is fragmented")
|
||||
|
||||
@property
|
||||
def count(self) -> int:
|
||||
"""The count of registered migrations."""
|
||||
return len(self._migrations)
|
||||
|
||||
@property
|
||||
def latest_version(self) -> int:
|
||||
"""Gets latest to_version among registered migrations. Returns 0 if there are no migrations registered."""
|
||||
if self.count == 0:
|
||||
return 0
|
||||
return sorted(self._migrations, key=lambda m: m.to_version)[-1].to_version
|
||||
@@ -1,130 +0,0 @@
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration, MigrationError, MigrationSet
|
||||
|
||||
|
||||
class SqliteMigrator:
|
||||
"""
|
||||
Manages migrations for a SQLite database.
|
||||
|
||||
:param db: The instance of :class:`SqliteDatabase` to migrate.
|
||||
|
||||
Migrations should be registered with :meth:`register_migration`.
|
||||
|
||||
Each migration is run in a transaction. If a migration fails, the transaction is rolled back.
|
||||
|
||||
Example Usage:
|
||||
```py
|
||||
db = SqliteDatabase(db_path="my_db.db", logger=logger)
|
||||
migrator = SqliteMigrator(db=db)
|
||||
migrator.register_migration(build_migration_1())
|
||||
migrator.register_migration(build_migration_2())
|
||||
migrator.run_migrations()
|
||||
```
|
||||
"""
|
||||
|
||||
backup_path: Optional[Path] = None
|
||||
|
||||
def __init__(self, db: SqliteDatabase) -> None:
|
||||
self._db = db
|
||||
self._logger = db.logger
|
||||
self._migration_set = MigrationSet()
|
||||
|
||||
def register_migration(self, migration: Migration) -> None:
|
||||
"""Registers a migration."""
|
||||
self._migration_set.register(migration)
|
||||
self._logger.debug(f"Registered migration {migration.from_version} -> {migration.to_version}")
|
||||
|
||||
def run_migrations(self) -> bool:
|
||||
"""Migrates the database to the latest version."""
|
||||
with self._db.lock:
|
||||
# This throws if there is a problem.
|
||||
self._migration_set.validate_migration_chain()
|
||||
cursor = self._db.conn.cursor()
|
||||
self._create_migrations_table(cursor=cursor)
|
||||
|
||||
if self._migration_set.count == 0:
|
||||
self._logger.debug("No migrations registered")
|
||||
return False
|
||||
|
||||
if self._get_current_version(cursor=cursor) == self._migration_set.latest_version:
|
||||
self._logger.debug("Database is up to date, no migrations to run")
|
||||
return False
|
||||
|
||||
self._logger.info("Database update needed")
|
||||
next_migration = self._migration_set.get(from_version=self._get_current_version(cursor))
|
||||
while next_migration is not None:
|
||||
self._run_migration(next_migration)
|
||||
next_migration = self._migration_set.get(self._get_current_version(cursor))
|
||||
self._logger.info("Database updated successfully")
|
||||
return True
|
||||
|
||||
def _run_migration(self, migration: Migration) -> None:
|
||||
"""Runs a single migration."""
|
||||
try:
|
||||
# Using sqlite3.Connection as a context manager commits a the transaction on exit, or rolls it back if an
|
||||
# exception is raised.
|
||||
with self._db.lock, self._db.conn as conn:
|
||||
cursor = conn.cursor()
|
||||
if self._get_current_version(cursor) != migration.from_version:
|
||||
raise MigrationError(
|
||||
f"Database is at version {self._get_current_version(cursor)}, expected {migration.from_version}"
|
||||
)
|
||||
self._logger.debug(f"Running migration from {migration.from_version} to {migration.to_version}")
|
||||
|
||||
# Run the actual migration
|
||||
migration.callback(cursor)
|
||||
|
||||
# Update the version
|
||||
cursor.execute("INSERT INTO migrations (version) VALUES (?);", (migration.to_version,))
|
||||
|
||||
self._logger.debug(
|
||||
f"Successfully migrated database from {migration.from_version} to {migration.to_version}"
|
||||
)
|
||||
# We want to catch *any* error, mirroring the behaviour of the sqlite3 module.
|
||||
except Exception as e:
|
||||
# The connection context manager has already rolled back the migration, so we don't need to do anything.
|
||||
msg = f"Error migrating database from {migration.from_version} to {migration.to_version}: {e}"
|
||||
self._logger.error(msg)
|
||||
raise MigrationError(msg) from e
|
||||
|
||||
def _create_migrations_table(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Creates the migrations table for the database, if one does not already exist."""
|
||||
with self._db.lock:
|
||||
try:
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='migrations';")
|
||||
if cursor.fetchone() is not None:
|
||||
return
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE migrations (
|
||||
version INTEGER PRIMARY KEY,
|
||||
migrated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW'))
|
||||
);
|
||||
"""
|
||||
)
|
||||
cursor.execute("INSERT INTO migrations (version) VALUES (0);")
|
||||
cursor.connection.commit()
|
||||
self._logger.debug("Created migrations table")
|
||||
except sqlite3.Error as e:
|
||||
msg = f"Problem creating migrations table: {e}"
|
||||
self._logger.error(msg)
|
||||
cursor.connection.rollback()
|
||||
raise MigrationError(msg) from e
|
||||
|
||||
@classmethod
|
||||
def _get_current_version(cls, cursor: sqlite3.Cursor) -> int:
|
||||
"""Gets the current version of the database, or 0 if the migrations table does not exist."""
|
||||
try:
|
||||
cursor.execute("SELECT MAX(version) FROM migrations;")
|
||||
version: int = cursor.fetchone()[0]
|
||||
if version is None:
|
||||
return 0
|
||||
return version
|
||||
except sqlite3.OperationalError as e:
|
||||
if "no such table" in str(e):
|
||||
return 0
|
||||
raise
|
||||
@@ -3,9 +3,10 @@ from enum import Enum
|
||||
from typing import Any, Union
|
||||
|
||||
import semver
|
||||
from pydantic import BaseModel, ConfigDict, Field, JsonValue, TypeAdapter, field_validator
|
||||
from pydantic import BaseModel, Field, JsonValue, TypeAdapter, field_validator
|
||||
|
||||
from invokeai.app.util.metaenum import MetaEnum
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
|
||||
__workflow_meta_version__ = semver.Version.parse("1.0.0")
|
||||
|
||||
@@ -31,13 +32,12 @@ class WorkflowRecordOrderBy(str, Enum, metaclass=MetaEnum):
|
||||
class WorkflowCategory(str, Enum, metaclass=MetaEnum):
|
||||
User = "user"
|
||||
Default = "default"
|
||||
Project = "project"
|
||||
|
||||
|
||||
class WorkflowMeta(BaseModel):
|
||||
version: str = Field(description="The version of the workflow schema.")
|
||||
category: WorkflowCategory = Field(
|
||||
default=WorkflowCategory.User, description="The category of the workflow (user or default)."
|
||||
)
|
||||
category: WorkflowCategory = Field(description="The category of the workflow (user or default).")
|
||||
|
||||
@field_validator("version")
|
||||
def validate_version(cls, version: str):
|
||||
@@ -65,26 +65,12 @@ class WorkflowWithoutID(BaseModel):
|
||||
nodes: list[dict[str, JsonValue]] = Field(description="The nodes of the workflow.")
|
||||
edges: list[dict[str, JsonValue]] = Field(description="The edges of the workflow.")
|
||||
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
|
||||
WorkflowWithoutIDValidator = TypeAdapter(WorkflowWithoutID)
|
||||
|
||||
|
||||
class UnsafeWorkflowWithVersion(BaseModel):
|
||||
"""
|
||||
This utility model only requires a workflow to have a valid version string.
|
||||
It is used to validate a workflow version without having to validate the entire workflow.
|
||||
"""
|
||||
|
||||
meta: WorkflowMeta = Field(description="The meta of the workflow.")
|
||||
|
||||
|
||||
UnsafeWorkflowWithVersionValidator = TypeAdapter(UnsafeWorkflowWithVersion)
|
||||
|
||||
|
||||
class Workflow(WorkflowWithoutID):
|
||||
id: str = Field(description="The id of the workflow.")
|
||||
id: str = Field(default_factory=uuid_string, description="The id of the workflow.")
|
||||
|
||||
|
||||
WorkflowValidator = TypeAdapter(Workflow)
|
||||
|
||||
@@ -14,10 +14,9 @@ from invokeai.app.services.workflow_records.workflow_records_common import (
|
||||
WorkflowRecordListItemDTO,
|
||||
WorkflowRecordListItemDTOValidator,
|
||||
WorkflowRecordOrderBy,
|
||||
WorkflowValidator,
|
||||
WorkflowWithoutID,
|
||||
WorkflowWithoutIDValidator,
|
||||
)
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
|
||||
|
||||
class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
@@ -26,6 +25,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
self._lock = db.lock
|
||||
self._conn = db.conn
|
||||
self._cursor = self._conn.cursor()
|
||||
self._create_tables()
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self._invoker = invoker
|
||||
@@ -66,7 +66,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
try:
|
||||
# Only user workflows may be created by this method
|
||||
assert workflow.meta.category is WorkflowCategory.User
|
||||
workflow_with_id = Workflow(**workflow.model_dump(), id=uuid_string())
|
||||
workflow_with_id = WorkflowValidator.validate_python(workflow.model_dump())
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
@@ -204,8 +204,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
workflow_paths = workflows_dir.glob("*.json")
|
||||
for path in workflow_paths:
|
||||
bytes_ = path.read_bytes()
|
||||
workflow_without_id = WorkflowWithoutIDValidator.validate_json(bytes_)
|
||||
workflow = Workflow(**workflow_without_id.model_dump(), id=uuid_string())
|
||||
workflow = WorkflowValidator.validate_json(bytes_)
|
||||
workflows.append(workflow)
|
||||
# Only default workflows may be managed by this method
|
||||
assert all(w.meta.category is WorkflowCategory.Default for w in workflows)
|
||||
@@ -232,3 +231,87 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def _create_tables(self) -> None:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS workflow_library (
|
||||
workflow_id TEXT NOT NULL PRIMARY KEY,
|
||||
workflow TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- updated manually when retrieving workflow
|
||||
opened_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Generated columns, needed for indexing and searching
|
||||
category TEXT GENERATED ALWAYS as (json_extract(workflow, '$.meta.category')) VIRTUAL NOT NULL,
|
||||
name TEXT GENERATED ALWAYS as (json_extract(workflow, '$.name')) VIRTUAL NOT NULL,
|
||||
description TEXT GENERATED ALWAYS as (json_extract(workflow, '$.description')) VIRTUAL NOT NULL
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS tg_workflow_library_updated_at
|
||||
AFTER UPDATE
|
||||
ON workflow_library FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE workflow_library
|
||||
SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE workflow_id = old.workflow_id;
|
||||
END;
|
||||
"""
|
||||
)
|
||||
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_workflow_library_created_at ON workflow_library(created_at);
|
||||
"""
|
||||
)
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_workflow_library_updated_at ON workflow_library(updated_at);
|
||||
"""
|
||||
)
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_workflow_library_opened_at ON workflow_library(opened_at);
|
||||
"""
|
||||
)
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_workflow_library_category ON workflow_library(category);
|
||||
"""
|
||||
)
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_workflow_library_name ON workflow_library(name);
|
||||
"""
|
||||
)
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
CREATE INDEX IF NOT EXISTS idx_workflow_library_description ON workflow_library(description);
|
||||
"""
|
||||
)
|
||||
|
||||
# We do not need the original `workflows` table or `workflow_images` junction table.
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DROP TABLE IF EXISTS workflow_images;
|
||||
"""
|
||||
)
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
DROP TABLE IF EXISTS workflows;
|
||||
"""
|
||||
)
|
||||
|
||||
self._conn.commit()
|
||||
except Exception:
|
||||
self._conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
@@ -32,8 +32,6 @@ class ModelProbeInfo(object):
|
||||
upcast_attention: bool
|
||||
format: Literal["diffusers", "checkpoint", "lycoris", "olive", "onnx"]
|
||||
image_size: int
|
||||
name: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
|
||||
|
||||
class ProbeBase(object):
|
||||
@@ -115,16 +113,12 @@ class ModelProbe(object):
|
||||
base_type = probe.get_base_type()
|
||||
variant_type = probe.get_variant_type()
|
||||
prediction_type = probe.get_scheduler_prediction_type()
|
||||
name = cls.get_model_name(model_path)
|
||||
description = f"{base_type.value} {model_type.value} model {name}"
|
||||
format = probe.get_format()
|
||||
model_info = ModelProbeInfo(
|
||||
model_type=model_type,
|
||||
base_type=base_type,
|
||||
variant_type=variant_type,
|
||||
prediction_type=prediction_type,
|
||||
name=name,
|
||||
description=description,
|
||||
upcast_attention=(
|
||||
base_type == BaseModelType.StableDiffusion2
|
||||
and prediction_type == SchedulerPredictionType.VPrediction
|
||||
@@ -148,13 +142,6 @@ class ModelProbe(object):
|
||||
|
||||
return model_info
|
||||
|
||||
@classmethod
|
||||
def get_model_name(cls, model_path: Path) -> str:
|
||||
if model_path.suffix in {".safetensors", ".bin", ".pt", ".ckpt"}:
|
||||
return model_path.stem
|
||||
else:
|
||||
return model_path.name
|
||||
|
||||
@classmethod
|
||||
def get_model_type_from_checkpoint(cls, model_path: Path, checkpoint: dict) -> ModelType:
|
||||
if model_path.suffix not in (".bin", ".pt", ".ckpt", ".safetensors", ".pth"):
|
||||
@@ -389,7 +376,7 @@ class TextualInversionCheckpointProbe(CheckpointProbeBase):
|
||||
elif "clip_g" in checkpoint:
|
||||
token_dim = checkpoint["clip_g"].shape[-1]
|
||||
else:
|
||||
token_dim = list(checkpoint.values())[0].shape[-1]
|
||||
token_dim = list(checkpoint.values())[0].shape[0]
|
||||
if token_dim == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif token_dim == 1024:
|
||||
|
||||
@@ -9,7 +9,7 @@ def lora_token_vector_length(checkpoint: dict) -> int:
|
||||
:param checkpoint: The checkpoint
|
||||
"""
|
||||
|
||||
def _get_shape_1(key: str, tensor, checkpoint) -> int:
|
||||
def _get_shape_1(key, tensor, checkpoint):
|
||||
lora_token_vector_length = None
|
||||
|
||||
if "." not in key:
|
||||
@@ -57,10 +57,6 @@ def lora_token_vector_length(checkpoint: dict) -> int:
|
||||
for key, tensor in checkpoint.items():
|
||||
if key.startswith("lora_unet_") and ("_attn2_to_k." in key or "_attn2_to_v." in key):
|
||||
lora_token_vector_length = _get_shape_1(key, tensor, checkpoint)
|
||||
elif key.startswith("lora_unet_") and (
|
||||
"time_emb_proj.lora_down" in key
|
||||
): # recognizes format at https://civitai.com/models/224641
|
||||
lora_token_vector_length = _get_shape_1(key, tensor, checkpoint)
|
||||
elif key.startswith("lora_te") and "_self_attn_" in key:
|
||||
tmp_length = _get_shape_1(key, tensor, checkpoint)
|
||||
if key.startswith("lora_te_"):
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
"""Re-export frequently-used symbols from the Model Manager backend."""
|
||||
|
||||
from .config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
InvalidModelConfigException,
|
||||
ModelConfigFactory,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
ModelVariantType,
|
||||
SchedulerPredictionType,
|
||||
SubModelType,
|
||||
)
|
||||
from .probe import ModelProbe
|
||||
from .search import ModelSearch
|
||||
|
||||
__all__ = [
|
||||
"ModelProbe",
|
||||
"ModelSearch",
|
||||
"InvalidModelConfigException",
|
||||
"ModelConfigFactory",
|
||||
"BaseModelType",
|
||||
"ModelType",
|
||||
"SubModelType",
|
||||
"ModelVariantType",
|
||||
"ModelFormat",
|
||||
"SchedulerPredictionType",
|
||||
"AnyModelConfig",
|
||||
]
|
||||
@@ -23,7 +23,7 @@ from enum import Enum
|
||||
from typing import Literal, Optional, Type, Union
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field, TypeAdapter
|
||||
from typing_extensions import Annotated, Any, Dict
|
||||
from typing_extensions import Annotated
|
||||
|
||||
|
||||
class InvalidModelConfigException(Exception):
|
||||
@@ -122,7 +122,7 @@ class ModelConfigBase(BaseModel):
|
||||
validate_assignment=True,
|
||||
)
|
||||
|
||||
def update(self, attributes: Dict[str, Any]) -> None:
|
||||
def update(self, attributes: dict):
|
||||
"""Update the object with fields in dict."""
|
||||
for key, value in attributes.items():
|
||||
setattr(self, key, value) # may raise a validation error
|
||||
@@ -195,6 +195,8 @@ class MainCheckpointConfig(_CheckpointConfig, _MainConfig):
|
||||
"""Model config for main checkpoint models."""
|
||||
|
||||
type: Literal[ModelType.Main] = ModelType.Main
|
||||
# Note that we do not need prediction_type or upcast_attention here
|
||||
# because they are provided in the checkpoint's own config file.
|
||||
|
||||
|
||||
class MainDiffusersConfig(_DiffusersConfig, _MainConfig):
|
||||
|
||||
93
invokeai/backend/model_manager/migrate_to_db.py
Normal file
93
invokeai/backend/model_manager/migrate_to_db.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein
|
||||
"""Migrate from the InvokeAI v2 models.yaml format to the v3 sqlite format."""
|
||||
|
||||
from hashlib import sha1
|
||||
|
||||
from omegaconf import DictConfig, OmegaConf
|
||||
from pydantic import TypeAdapter
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_records import (
|
||||
DuplicateModelException,
|
||||
ModelRecordServiceSQL,
|
||||
)
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.hash import FastModelHash
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
ModelsValidator = TypeAdapter(AnyModelConfig)
|
||||
|
||||
|
||||
class MigrateModelYamlToDb:
|
||||
"""
|
||||
Migrate the InvokeAI models.yaml format (VERSION 3.0.0) to SQL3 database format (VERSION 3.2.0)
|
||||
|
||||
The class has one externally useful method, migrate(), which scans the
|
||||
currently models.yaml file and imports all its entries into invokeai.db.
|
||||
|
||||
Use this way:
|
||||
|
||||
from invokeai.backend.model_manager/migrate_to_db import MigrateModelYamlToDb
|
||||
MigrateModelYamlToDb().migrate()
|
||||
|
||||
"""
|
||||
|
||||
config: InvokeAIAppConfig
|
||||
logger: InvokeAILogger
|
||||
|
||||
def __init__(self):
|
||||
self.config = InvokeAIAppConfig.get_config()
|
||||
self.config.parse_args()
|
||||
self.logger = InvokeAILogger.get_logger()
|
||||
|
||||
def get_db(self) -> ModelRecordServiceSQL:
|
||||
"""Fetch the sqlite3 database for this installation."""
|
||||
db = SqliteDatabase(self.config, self.logger)
|
||||
return ModelRecordServiceSQL(db)
|
||||
|
||||
def get_yaml(self) -> DictConfig:
|
||||
"""Fetch the models.yaml DictConfig for this installation."""
|
||||
yaml_path = self.config.model_conf_path
|
||||
return OmegaConf.load(yaml_path)
|
||||
|
||||
def migrate(self):
|
||||
"""Do the migration from models.yaml to invokeai.db."""
|
||||
db = self.get_db()
|
||||
yaml = self.get_yaml()
|
||||
|
||||
for model_key, stanza in yaml.items():
|
||||
if model_key == "__metadata__":
|
||||
assert (
|
||||
stanza["version"] == "3.0.0"
|
||||
), f"This script works on version 3.0.0 yaml files, but your configuration points to a {stanza['version']} version"
|
||||
continue
|
||||
|
||||
base_type, model_type, model_name = str(model_key).split("/")
|
||||
hash = FastModelHash.hash(self.config.models_path / stanza.path)
|
||||
new_key = sha1(model_key.encode("utf-8")).hexdigest()
|
||||
|
||||
stanza["base"] = BaseModelType(base_type)
|
||||
stanza["type"] = ModelType(model_type)
|
||||
stanza["name"] = model_name
|
||||
stanza["original_hash"] = hash
|
||||
stanza["current_hash"] = hash
|
||||
|
||||
new_config = ModelsValidator.validate_python(stanza)
|
||||
self.logger.info(f"Adding model {model_name} with key {model_key}")
|
||||
try:
|
||||
db.add_model(new_key, new_config)
|
||||
except DuplicateModelException:
|
||||
self.logger.warning(f"Model {model_name} is already in the database")
|
||||
|
||||
|
||||
def main():
|
||||
MigrateModelYamlToDb().migrate()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,686 +0,0 @@
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Literal, Optional, Union
|
||||
|
||||
import safetensors.torch
|
||||
import torch
|
||||
from picklescan.scanner import scan_file_path
|
||||
|
||||
from invokeai.backend.model_management.models.base import read_checkpoint_meta
|
||||
from invokeai.backend.model_management.models.ip_adapter import IPAdapterModelFormat
|
||||
from invokeai.backend.model_management.util import lora_token_vector_length
|
||||
from invokeai.backend.util.util import SilenceWarnings
|
||||
|
||||
from .config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
InvalidModelConfigException,
|
||||
ModelConfigFactory,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
ModelVariantType,
|
||||
SchedulerPredictionType,
|
||||
)
|
||||
from .hash import FastModelHash
|
||||
|
||||
CkptType = Dict[str, Any]
|
||||
|
||||
LEGACY_CONFIGS: Dict[BaseModelType, Dict[ModelVariantType, Union[str, Dict[SchedulerPredictionType, str]]]] = {
|
||||
BaseModelType.StableDiffusion1: {
|
||||
ModelVariantType.Normal: "v1-inference.yaml",
|
||||
ModelVariantType.Inpaint: "v1-inpainting-inference.yaml",
|
||||
},
|
||||
BaseModelType.StableDiffusion2: {
|
||||
ModelVariantType.Normal: {
|
||||
SchedulerPredictionType.Epsilon: "v2-inference.yaml",
|
||||
SchedulerPredictionType.VPrediction: "v2-inference-v.yaml",
|
||||
},
|
||||
ModelVariantType.Inpaint: {
|
||||
SchedulerPredictionType.Epsilon: "v2-inpainting-inference.yaml",
|
||||
SchedulerPredictionType.VPrediction: "v2-inpainting-inference-v.yaml",
|
||||
},
|
||||
},
|
||||
BaseModelType.StableDiffusionXL: {
|
||||
ModelVariantType.Normal: "sd_xl_base.yaml",
|
||||
},
|
||||
BaseModelType.StableDiffusionXLRefiner: {
|
||||
ModelVariantType.Normal: "sd_xl_refiner.yaml",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class ProbeBase(object):
|
||||
"""Base class for probes."""
|
||||
|
||||
def __init__(self, model_path: Path):
|
||||
self.model_path = model_path
|
||||
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
"""Get model base type."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_format(self) -> ModelFormat:
|
||||
"""Get model file format."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_variant_type(self) -> Optional[ModelVariantType]:
|
||||
"""Get model variant type."""
|
||||
return None
|
||||
|
||||
def get_scheduler_prediction_type(self) -> Optional[SchedulerPredictionType]:
|
||||
"""Get model scheduler prediction type."""
|
||||
return None
|
||||
|
||||
|
||||
class ModelProbe(object):
|
||||
PROBES: Dict[str, Dict[ModelType, type[ProbeBase]]] = {
|
||||
"diffusers": {},
|
||||
"checkpoint": {},
|
||||
"onnx": {},
|
||||
}
|
||||
|
||||
CLASS2TYPE = {
|
||||
"StableDiffusionPipeline": ModelType.Main,
|
||||
"StableDiffusionInpaintPipeline": ModelType.Main,
|
||||
"StableDiffusionXLPipeline": ModelType.Main,
|
||||
"StableDiffusionXLImg2ImgPipeline": ModelType.Main,
|
||||
"StableDiffusionXLInpaintPipeline": ModelType.Main,
|
||||
"LatentConsistencyModelPipeline": ModelType.Main,
|
||||
"AutoencoderKL": ModelType.Vae,
|
||||
"AutoencoderTiny": ModelType.Vae,
|
||||
"ControlNetModel": ModelType.ControlNet,
|
||||
"CLIPVisionModelWithProjection": ModelType.CLIPVision,
|
||||
"T2IAdapter": ModelType.T2IAdapter,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def register_probe(
|
||||
cls, format: Literal["diffusers", "checkpoint", "onnx"], model_type: ModelType, probe_class: type[ProbeBase]
|
||||
) -> None:
|
||||
cls.PROBES[format][model_type] = probe_class
|
||||
|
||||
@classmethod
|
||||
def heuristic_probe(
|
||||
cls,
|
||||
model_path: Path,
|
||||
fields: Optional[Dict[str, Any]] = None,
|
||||
) -> AnyModelConfig:
|
||||
return cls.probe(model_path, fields)
|
||||
|
||||
@classmethod
|
||||
def probe(
|
||||
cls,
|
||||
model_path: Path,
|
||||
fields: Optional[Dict[str, Any]] = None,
|
||||
) -> AnyModelConfig:
|
||||
"""
|
||||
Probe the model at model_path and return its configuration record.
|
||||
|
||||
:param model_path: Path to the model file (checkpoint) or directory (diffusers).
|
||||
:param fields: An optional dictionary that can be used to override probed
|
||||
fields. Typically used for fields that don't probe well, such as prediction_type.
|
||||
|
||||
Returns: The appropriate model configuration derived from ModelConfigBase.
|
||||
"""
|
||||
if fields is None:
|
||||
fields = {}
|
||||
|
||||
format_type = ModelFormat.Diffusers if model_path.is_dir() else ModelFormat.Checkpoint
|
||||
model_info = None
|
||||
model_type = None
|
||||
if format_type == "diffusers":
|
||||
model_type = cls.get_model_type_from_folder(model_path)
|
||||
else:
|
||||
model_type = cls.get_model_type_from_checkpoint(model_path)
|
||||
format_type = ModelFormat.Onnx if model_type == ModelType.ONNX else format_type
|
||||
|
||||
probe_class = cls.PROBES[format_type].get(model_type)
|
||||
if not probe_class:
|
||||
raise InvalidModelConfigException(f"Unhandled combination of {format_type} and {model_type}")
|
||||
|
||||
hash = FastModelHash.hash(model_path)
|
||||
probe = probe_class(model_path)
|
||||
|
||||
fields["path"] = model_path.as_posix()
|
||||
fields["type"] = fields.get("type") or model_type
|
||||
fields["base"] = fields.get("base") or probe.get_base_type()
|
||||
fields["variant"] = fields.get("variant") or probe.get_variant_type()
|
||||
fields["prediction_type"] = fields.get("prediction_type") or probe.get_scheduler_prediction_type()
|
||||
fields["name"] = fields.get("name") or cls.get_model_name(model_path)
|
||||
fields["description"] = (
|
||||
fields.get("description") or f"{fields['base'].value} {fields['type'].value} model {fields['name']}"
|
||||
)
|
||||
fields["format"] = fields.get("format") or probe.get_format()
|
||||
fields["original_hash"] = fields.get("original_hash") or hash
|
||||
fields["current_hash"] = fields.get("current_hash") or hash
|
||||
|
||||
# additional fields needed for main and controlnet models
|
||||
if fields["type"] in [ModelType.Main, ModelType.ControlNet] and fields["format"] == ModelFormat.Checkpoint:
|
||||
fields["config"] = cls._get_checkpoint_config_path(
|
||||
model_path,
|
||||
model_type=fields["type"],
|
||||
base_type=fields["base"],
|
||||
variant_type=fields["variant"],
|
||||
prediction_type=fields["prediction_type"],
|
||||
).as_posix()
|
||||
|
||||
# additional fields needed for main non-checkpoint models
|
||||
elif fields["type"] == ModelType.Main and fields["format"] in [
|
||||
ModelFormat.Onnx,
|
||||
ModelFormat.Olive,
|
||||
ModelFormat.Diffusers,
|
||||
]:
|
||||
fields["upcast_attention"] = fields.get("upcast_attention") or (
|
||||
fields["base"] == BaseModelType.StableDiffusion2
|
||||
and fields["prediction_type"] == SchedulerPredictionType.VPrediction
|
||||
)
|
||||
|
||||
model_info = ModelConfigFactory.make_config(fields)
|
||||
return model_info
|
||||
|
||||
@classmethod
|
||||
def get_model_name(cls, model_path: Path) -> str:
|
||||
if model_path.suffix in {".safetensors", ".bin", ".pt", ".ckpt"}:
|
||||
return model_path.stem
|
||||
else:
|
||||
return model_path.name
|
||||
|
||||
@classmethod
|
||||
def get_model_type_from_checkpoint(cls, model_path: Path, checkpoint: Optional[CkptType] = None) -> ModelType:
|
||||
if model_path.suffix not in (".bin", ".pt", ".ckpt", ".safetensors", ".pth"):
|
||||
raise InvalidModelConfigException(f"{model_path}: unrecognized suffix")
|
||||
|
||||
if model_path.name == "learned_embeds.bin":
|
||||
return ModelType.TextualInversion
|
||||
|
||||
ckpt = checkpoint if checkpoint else read_checkpoint_meta(model_path, scan=True)
|
||||
ckpt = ckpt.get("state_dict", ckpt)
|
||||
|
||||
for key in ckpt.keys():
|
||||
if any(key.startswith(v) for v in {"cond_stage_model.", "first_stage_model.", "model.diffusion_model."}):
|
||||
return ModelType.Main
|
||||
elif any(key.startswith(v) for v in {"encoder.conv_in", "decoder.conv_in"}):
|
||||
return ModelType.Vae
|
||||
elif any(key.startswith(v) for v in {"lora_te_", "lora_unet_"}):
|
||||
return ModelType.Lora
|
||||
elif any(key.endswith(v) for v in {"to_k_lora.up.weight", "to_q_lora.down.weight"}):
|
||||
return ModelType.Lora
|
||||
elif any(key.startswith(v) for v in {"control_model", "input_blocks"}):
|
||||
return ModelType.ControlNet
|
||||
elif key in {"emb_params", "string_to_param"}:
|
||||
return ModelType.TextualInversion
|
||||
|
||||
else:
|
||||
# diffusers-ti
|
||||
if len(ckpt) < 10 and all(isinstance(v, torch.Tensor) for v in ckpt.values()):
|
||||
return ModelType.TextualInversion
|
||||
|
||||
raise InvalidModelConfigException(f"Unable to determine model type for {model_path}")
|
||||
|
||||
@classmethod
|
||||
def get_model_type_from_folder(cls, folder_path: Path) -> ModelType:
|
||||
"""Get the model type of a hugging-face style folder."""
|
||||
class_name = None
|
||||
error_hint = None
|
||||
for suffix in ["bin", "safetensors"]:
|
||||
if (folder_path / f"learned_embeds.{suffix}").exists():
|
||||
return ModelType.TextualInversion
|
||||
if (folder_path / f"pytorch_lora_weights.{suffix}").exists():
|
||||
return ModelType.Lora
|
||||
if (folder_path / "unet/model.onnx").exists():
|
||||
return ModelType.ONNX
|
||||
if (folder_path / "image_encoder.txt").exists():
|
||||
return ModelType.IPAdapter
|
||||
|
||||
i = folder_path / "model_index.json"
|
||||
c = folder_path / "config.json"
|
||||
config_path = i if i.exists() else c if c.exists() else None
|
||||
|
||||
if config_path:
|
||||
with open(config_path, "r") as file:
|
||||
conf = json.load(file)
|
||||
if "_class_name" in conf:
|
||||
class_name = conf["_class_name"]
|
||||
elif "architectures" in conf:
|
||||
class_name = conf["architectures"][0]
|
||||
else:
|
||||
class_name = None
|
||||
else:
|
||||
error_hint = f"No model_index.json or config.json found in {folder_path}."
|
||||
|
||||
if class_name and (type := cls.CLASS2TYPE.get(class_name)):
|
||||
return type
|
||||
else:
|
||||
error_hint = f"class {class_name} is not one of the supported classes [{', '.join(cls.CLASS2TYPE.keys())}]"
|
||||
|
||||
# give up
|
||||
raise InvalidModelConfigException(
|
||||
f"Unable to determine model type for {folder_path}" + (f"; {error_hint}" if error_hint else "")
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _get_checkpoint_config_path(
|
||||
cls,
|
||||
model_path: Path,
|
||||
model_type: ModelType,
|
||||
base_type: BaseModelType,
|
||||
variant_type: ModelVariantType,
|
||||
prediction_type: SchedulerPredictionType,
|
||||
) -> Path:
|
||||
# look for a YAML file adjacent to the model file first
|
||||
possible_conf = model_path.with_suffix(".yaml")
|
||||
if possible_conf.exists():
|
||||
return possible_conf.absolute()
|
||||
|
||||
if model_type == ModelType.Main:
|
||||
config_file = LEGACY_CONFIGS[base_type][variant_type]
|
||||
if isinstance(config_file, dict): # need another tier for sd-2.x models
|
||||
config_file = config_file[prediction_type]
|
||||
elif model_type == ModelType.ControlNet:
|
||||
config_file = (
|
||||
"../controlnet/cldm_v15.yaml" if base_type == BaseModelType("sd-1") else "../controlnet/cldm_v21.yaml"
|
||||
)
|
||||
else:
|
||||
raise InvalidModelConfigException(
|
||||
f"{model_path}: Unrecognized combination of model_type={model_type}, base_type={base_type}"
|
||||
)
|
||||
assert isinstance(config_file, str)
|
||||
return Path(config_file)
|
||||
|
||||
@classmethod
|
||||
def _scan_and_load_checkpoint(cls, model_path: Path) -> CkptType:
|
||||
with SilenceWarnings():
|
||||
if model_path.suffix.endswith((".ckpt", ".pt", ".bin")):
|
||||
cls._scan_model(model_path.name, model_path)
|
||||
model = torch.load(model_path)
|
||||
assert isinstance(model, dict)
|
||||
return model
|
||||
else:
|
||||
return safetensors.torch.load_file(model_path)
|
||||
|
||||
@classmethod
|
||||
def _scan_model(cls, model_name: str, checkpoint: Path) -> None:
|
||||
"""
|
||||
Apply picklescanner to the indicated checkpoint and issue a warning
|
||||
and option to exit if an infected file is identified.
|
||||
"""
|
||||
# scan model
|
||||
scan_result = scan_file_path(checkpoint)
|
||||
if scan_result.infected_files != 0:
|
||||
raise Exception("The model {model_name} is potentially infected by malware. Aborting import.")
|
||||
|
||||
|
||||
# ##################################################3
|
||||
# Checkpoint probing
|
||||
# ##################################################3
|
||||
|
||||
|
||||
class CheckpointProbeBase(ProbeBase):
|
||||
def __init__(self, model_path: Path):
|
||||
super().__init__(model_path)
|
||||
self.checkpoint = ModelProbe._scan_and_load_checkpoint(model_path)
|
||||
|
||||
def get_format(self) -> ModelFormat:
|
||||
return ModelFormat("checkpoint")
|
||||
|
||||
def get_variant_type(self) -> ModelVariantType:
|
||||
model_type = ModelProbe.get_model_type_from_checkpoint(self.model_path, self.checkpoint)
|
||||
if model_type != ModelType.Main:
|
||||
return ModelVariantType.Normal
|
||||
state_dict = self.checkpoint.get("state_dict") or self.checkpoint
|
||||
in_channels = state_dict["model.diffusion_model.input_blocks.0.0.weight"].shape[1]
|
||||
if in_channels == 9:
|
||||
return ModelVariantType.Inpaint
|
||||
elif in_channels == 5:
|
||||
return ModelVariantType.Depth
|
||||
elif in_channels == 4:
|
||||
return ModelVariantType.Normal
|
||||
else:
|
||||
raise InvalidModelConfigException(
|
||||
f"Cannot determine variant type (in_channels={in_channels}) at {self.model_path}"
|
||||
)
|
||||
|
||||
|
||||
class PipelineCheckpointProbe(CheckpointProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
checkpoint = self.checkpoint
|
||||
state_dict = self.checkpoint.get("state_dict") or checkpoint
|
||||
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
|
||||
if key_name in state_dict and state_dict[key_name].shape[-1] == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
if key_name in state_dict and state_dict[key_name].shape[-1] == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
key_name = "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_k.weight"
|
||||
if key_name in state_dict and state_dict[key_name].shape[-1] == 2048:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
elif key_name in state_dict and state_dict[key_name].shape[-1] == 1280:
|
||||
return BaseModelType.StableDiffusionXLRefiner
|
||||
else:
|
||||
raise InvalidModelConfigException("Cannot determine base type")
|
||||
|
||||
def get_scheduler_prediction_type(self) -> SchedulerPredictionType:
|
||||
"""Return model prediction type."""
|
||||
type = self.get_base_type()
|
||||
if type == BaseModelType.StableDiffusion2:
|
||||
checkpoint = self.checkpoint
|
||||
state_dict = self.checkpoint.get("state_dict") or checkpoint
|
||||
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
|
||||
if key_name in state_dict and state_dict[key_name].shape[-1] == 1024:
|
||||
if "global_step" in checkpoint:
|
||||
if checkpoint["global_step"] == 220000:
|
||||
return SchedulerPredictionType.Epsilon
|
||||
elif checkpoint["global_step"] == 110000:
|
||||
return SchedulerPredictionType.VPrediction
|
||||
return SchedulerPredictionType.VPrediction # a guess for sd2 ckpts
|
||||
|
||||
elif type == BaseModelType.StableDiffusion1:
|
||||
return SchedulerPredictionType.Epsilon # a reasonable guess for sd1 ckpts
|
||||
else:
|
||||
return SchedulerPredictionType.Epsilon
|
||||
|
||||
|
||||
class VaeCheckpointProbe(CheckpointProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
# I can't find any standalone 2.X VAEs to test with!
|
||||
return BaseModelType.StableDiffusion1
|
||||
|
||||
|
||||
class LoRACheckpointProbe(CheckpointProbeBase):
|
||||
"""Class for LoRA checkpoints."""
|
||||
|
||||
def get_format(self) -> ModelFormat:
|
||||
return ModelFormat("lycoris")
|
||||
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
checkpoint = self.checkpoint
|
||||
token_vector_length = lora_token_vector_length(checkpoint)
|
||||
|
||||
if token_vector_length == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif token_vector_length == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
elif token_vector_length == 1280:
|
||||
return BaseModelType.StableDiffusionXL # recognizes format at https://civitai.com/models/224641
|
||||
elif token_vector_length == 2048:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
else:
|
||||
raise InvalidModelConfigException(f"Unknown LoRA type: {self.model_path}")
|
||||
|
||||
|
||||
class TextualInversionCheckpointProbe(CheckpointProbeBase):
|
||||
"""Class for probing embeddings."""
|
||||
|
||||
def get_format(self) -> ModelFormat:
|
||||
return ModelFormat.EmbeddingFile
|
||||
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
checkpoint = self.checkpoint
|
||||
if "string_to_token" in checkpoint:
|
||||
token_dim = list(checkpoint["string_to_param"].values())[0].shape[-1]
|
||||
elif "emb_params" in checkpoint:
|
||||
token_dim = checkpoint["emb_params"].shape[-1]
|
||||
elif "clip_g" in checkpoint:
|
||||
token_dim = checkpoint["clip_g"].shape[-1]
|
||||
else:
|
||||
token_dim = list(checkpoint.values())[0].shape[0]
|
||||
if token_dim == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif token_dim == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
elif token_dim == 1280:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
else:
|
||||
raise InvalidModelConfigException(f"{self.model_path}: Could not determine base type")
|
||||
|
||||
|
||||
class ControlNetCheckpointProbe(CheckpointProbeBase):
|
||||
"""Class for probing controlnets."""
|
||||
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
checkpoint = self.checkpoint
|
||||
for key_name in (
|
||||
"control_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight",
|
||||
"input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight",
|
||||
):
|
||||
if key_name not in checkpoint:
|
||||
continue
|
||||
if checkpoint[key_name].shape[-1] == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif checkpoint[key_name].shape[-1] == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
raise InvalidModelConfigException("{self.model_path}: Unable to determine base type")
|
||||
|
||||
|
||||
class IPAdapterCheckpointProbe(CheckpointProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class CLIPVisionCheckpointProbe(CheckpointProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class T2IAdapterCheckpointProbe(CheckpointProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
########################################################
|
||||
# classes for probing folders
|
||||
#######################################################
|
||||
class FolderProbeBase(ProbeBase):
|
||||
def get_variant_type(self) -> ModelVariantType:
|
||||
return ModelVariantType.Normal
|
||||
|
||||
def get_format(self) -> ModelFormat:
|
||||
return ModelFormat("diffusers")
|
||||
|
||||
|
||||
class PipelineFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
with open(self.model_path / "unet" / "config.json", "r") as file:
|
||||
unet_conf = json.load(file)
|
||||
if unet_conf["cross_attention_dim"] == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif unet_conf["cross_attention_dim"] == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
elif unet_conf["cross_attention_dim"] == 1280:
|
||||
return BaseModelType.StableDiffusionXLRefiner
|
||||
elif unet_conf["cross_attention_dim"] == 2048:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
else:
|
||||
raise InvalidModelConfigException(f"Unknown base model for {self.model_path}")
|
||||
|
||||
def get_scheduler_prediction_type(self) -> SchedulerPredictionType:
|
||||
with open(self.model_path / "scheduler" / "scheduler_config.json", "r") as file:
|
||||
scheduler_conf = json.load(file)
|
||||
if scheduler_conf["prediction_type"] == "v_prediction":
|
||||
return SchedulerPredictionType.VPrediction
|
||||
elif scheduler_conf["prediction_type"] == "epsilon":
|
||||
return SchedulerPredictionType.Epsilon
|
||||
else:
|
||||
raise InvalidModelConfigException("Unknown scheduler prediction type: {scheduler_conf['prediction_type']}")
|
||||
|
||||
def get_variant_type(self) -> ModelVariantType:
|
||||
# This only works for pipelines! Any kind of
|
||||
# exception results in our returning the
|
||||
# "normal" variant type
|
||||
try:
|
||||
config_file = self.model_path / "unet" / "config.json"
|
||||
with open(config_file, "r") as file:
|
||||
conf = json.load(file)
|
||||
|
||||
in_channels = conf["in_channels"]
|
||||
if in_channels == 9:
|
||||
return ModelVariantType.Inpaint
|
||||
elif in_channels == 5:
|
||||
return ModelVariantType.Depth
|
||||
elif in_channels == 4:
|
||||
return ModelVariantType.Normal
|
||||
except Exception:
|
||||
pass
|
||||
return ModelVariantType.Normal
|
||||
|
||||
|
||||
class VaeFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
if self._config_looks_like_sdxl():
|
||||
return BaseModelType.StableDiffusionXL
|
||||
elif self._name_looks_like_sdxl():
|
||||
# but SD and SDXL VAE are the same shape (3-channel RGB to 4-channel float scaled down
|
||||
# by a factor of 8), we can't necessarily tell them apart by config hyperparameters.
|
||||
return BaseModelType.StableDiffusionXL
|
||||
else:
|
||||
return BaseModelType.StableDiffusion1
|
||||
|
||||
def _config_looks_like_sdxl(self) -> bool:
|
||||
# config values that distinguish Stability's SD 1.x VAE from their SDXL VAE.
|
||||
config_file = self.model_path / "config.json"
|
||||
if not config_file.exists():
|
||||
raise InvalidModelConfigException(f"Cannot determine base type for {self.model_path}")
|
||||
with open(config_file, "r") as file:
|
||||
config = json.load(file)
|
||||
return config.get("scaling_factor", 0) == 0.13025 and config.get("sample_size") in [512, 1024]
|
||||
|
||||
def _name_looks_like_sdxl(self) -> bool:
|
||||
return bool(re.search(r"xl\b", self._guess_name(), re.IGNORECASE))
|
||||
|
||||
def _guess_name(self) -> str:
|
||||
name = self.model_path.name
|
||||
if name == "vae":
|
||||
name = self.model_path.parent.name
|
||||
return name
|
||||
|
||||
|
||||
class TextualInversionFolderProbe(FolderProbeBase):
|
||||
def get_format(self) -> ModelFormat:
|
||||
return ModelFormat.EmbeddingFolder
|
||||
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
path = self.model_path / "learned_embeds.bin"
|
||||
if not path.exists():
|
||||
raise InvalidModelConfigException(
|
||||
f"{self.model_path.as_posix()} does not contain expected 'learned_embeds.bin' file"
|
||||
)
|
||||
return TextualInversionCheckpointProbe(path).get_base_type()
|
||||
|
||||
|
||||
class ONNXFolderProbe(FolderProbeBase):
|
||||
def get_format(self) -> ModelFormat:
|
||||
return ModelFormat("onnx")
|
||||
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
return BaseModelType.StableDiffusion1
|
||||
|
||||
def get_variant_type(self) -> ModelVariantType:
|
||||
return ModelVariantType.Normal
|
||||
|
||||
|
||||
class ControlNetFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
config_file = self.model_path / "config.json"
|
||||
if not config_file.exists():
|
||||
raise InvalidModelConfigException(f"Cannot determine base type for {self.model_path}")
|
||||
with open(config_file, "r") as file:
|
||||
config = json.load(file)
|
||||
# no obvious way to distinguish between sd2-base and sd2-768
|
||||
dimension = config["cross_attention_dim"]
|
||||
base_model = (
|
||||
BaseModelType.StableDiffusion1
|
||||
if dimension == 768
|
||||
else (
|
||||
BaseModelType.StableDiffusion2
|
||||
if dimension == 1024
|
||||
else BaseModelType.StableDiffusionXL
|
||||
if dimension == 2048
|
||||
else None
|
||||
)
|
||||
)
|
||||
if not base_model:
|
||||
raise InvalidModelConfigException(f"Unable to determine model base for {self.model_path}")
|
||||
return base_model
|
||||
|
||||
|
||||
class LoRAFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
model_file = None
|
||||
for suffix in ["safetensors", "bin"]:
|
||||
base_file = self.model_path / f"pytorch_lora_weights.{suffix}"
|
||||
if base_file.exists():
|
||||
model_file = base_file
|
||||
break
|
||||
if not model_file:
|
||||
raise InvalidModelConfigException("Unknown LoRA format encountered")
|
||||
return LoRACheckpointProbe(model_file).get_base_type()
|
||||
|
||||
|
||||
class IPAdapterFolderProbe(FolderProbeBase):
|
||||
def get_format(self) -> IPAdapterModelFormat:
|
||||
return IPAdapterModelFormat.InvokeAI.value
|
||||
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
model_file = self.model_path / "ip_adapter.bin"
|
||||
if not model_file.exists():
|
||||
raise InvalidModelConfigException("Unknown IP-Adapter model format.")
|
||||
|
||||
state_dict = torch.load(model_file, map_location="cpu")
|
||||
cross_attention_dim = state_dict["ip_adapter"]["1.to_k_ip.weight"].shape[-1]
|
||||
if cross_attention_dim == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
elif cross_attention_dim == 1024:
|
||||
return BaseModelType.StableDiffusion2
|
||||
elif cross_attention_dim == 2048:
|
||||
return BaseModelType.StableDiffusionXL
|
||||
else:
|
||||
raise InvalidModelConfigException(
|
||||
f"IP-Adapter had unexpected cross-attention dimension: {cross_attention_dim}."
|
||||
)
|
||||
|
||||
|
||||
class CLIPVisionFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
return BaseModelType.Any
|
||||
|
||||
|
||||
class T2IAdapterFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
config_file = self.model_path / "config.json"
|
||||
if not config_file.exists():
|
||||
raise InvalidModelConfigException(f"Cannot determine base type for {self.model_path}")
|
||||
with open(config_file, "r") as file:
|
||||
config = json.load(file)
|
||||
|
||||
adapter_type = config.get("adapter_type", None)
|
||||
if adapter_type == "full_adapter_xl":
|
||||
return BaseModelType.StableDiffusionXL
|
||||
elif adapter_type == "full_adapter" or "light_adapter":
|
||||
# I haven't seen any T2I adapter models for SD2, so assume that this is an SD1 adapter.
|
||||
return BaseModelType.StableDiffusion1
|
||||
else:
|
||||
raise InvalidModelConfigException(
|
||||
f"Unable to determine base model for '{self.model_path}' (adapter_type = {adapter_type})."
|
||||
)
|
||||
|
||||
|
||||
############## register probe classes ######
|
||||
ModelProbe.register_probe("diffusers", ModelType.Main, PipelineFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.Vae, VaeFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.Lora, LoRAFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.TextualInversion, TextualInversionFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.ControlNet, ControlNetFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.IPAdapter, IPAdapterFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.CLIPVision, CLIPVisionFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.T2IAdapter, T2IAdapterFolderProbe)
|
||||
|
||||
ModelProbe.register_probe("checkpoint", ModelType.Main, PipelineCheckpointProbe)
|
||||
ModelProbe.register_probe("checkpoint", ModelType.Vae, VaeCheckpointProbe)
|
||||
ModelProbe.register_probe("checkpoint", ModelType.Lora, LoRACheckpointProbe)
|
||||
ModelProbe.register_probe("checkpoint", ModelType.TextualInversion, TextualInversionCheckpointProbe)
|
||||
ModelProbe.register_probe("checkpoint", ModelType.ControlNet, ControlNetCheckpointProbe)
|
||||
ModelProbe.register_probe("checkpoint", ModelType.IPAdapter, IPAdapterCheckpointProbe)
|
||||
ModelProbe.register_probe("checkpoint", ModelType.CLIPVision, CLIPVisionCheckpointProbe)
|
||||
ModelProbe.register_probe("checkpoint", ModelType.T2IAdapter, T2IAdapterCheckpointProbe)
|
||||
|
||||
ModelProbe.register_probe("onnx", ModelType.ONNX, ONNXFolderProbe)
|
||||
@@ -1,190 +0,0 @@
|
||||
# Copyright 2023, Lincoln D. Stein and the InvokeAI Team
|
||||
"""
|
||||
Abstract base class and implementation for recursive directory search for models.
|
||||
|
||||
Example usage:
|
||||
```
|
||||
from invokeai.backend.model_manager import ModelSearch, ModelProbe
|
||||
|
||||
def find_main_models(model: Path) -> bool:
|
||||
info = ModelProbe.probe(model)
|
||||
if info.model_type == 'main' and info.base_type == 'sd-1':
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
search = ModelSearch(on_model_found=report_it)
|
||||
found = search.search('/tmp/models')
|
||||
print(found) # list of matching model paths
|
||||
print(search.stats) # search stats
|
||||
```
|
||||
"""
|
||||
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Callable, Optional, Set, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
default_logger = InvokeAILogger.get_logger()
|
||||
|
||||
|
||||
class SearchStats(BaseModel):
|
||||
items_scanned: int = 0
|
||||
models_found: int = 0
|
||||
models_filtered: int = 0
|
||||
|
||||
|
||||
class ModelSearchBase(ABC, BaseModel):
|
||||
"""
|
||||
Abstract directory traversal model search class
|
||||
|
||||
Usage:
|
||||
search = ModelSearchBase(
|
||||
on_search_started = search_started_callback,
|
||||
on_search_completed = search_completed_callback,
|
||||
on_model_found = model_found_callback,
|
||||
)
|
||||
models_found = search.search('/path/to/directory')
|
||||
"""
|
||||
|
||||
# fmt: off
|
||||
on_search_started : Optional[Callable[[Path], None]] = Field(default=None, description="Called just before the search starts.") # noqa E221
|
||||
on_model_found : Optional[Callable[[Path], bool]] = Field(default=None, description="Called when a model is found.") # noqa E221
|
||||
on_search_completed : Optional[Callable[[Set[Path]], None]] = Field(default=None, description="Called when search is complete.") # noqa E221
|
||||
stats : SearchStats = Field(default_factory=SearchStats, description="Summary statistics after search") # noqa E221
|
||||
logger : InvokeAILogger = Field(default=default_logger, description="Logger instance.") # noqa E221
|
||||
# fmt: on
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@abstractmethod
|
||||
def search_started(self) -> None:
|
||||
"""
|
||||
Called before the scan starts.
|
||||
|
||||
Passes the root search directory to the Callable `on_search_started`.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def model_found(self, model: Path) -> None:
|
||||
"""
|
||||
Called when a model is found during search.
|
||||
|
||||
:param model: Model to process - could be a directory or checkpoint.
|
||||
|
||||
Passes the model's Path to the Callable `on_model_found`.
|
||||
This Callable receives the path to the model and returns a boolean
|
||||
to indicate whether the model should be returned in the search
|
||||
results.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def search_completed(self) -> None:
|
||||
"""
|
||||
Called before the scan starts.
|
||||
|
||||
Passes the Set of found model Paths to the Callable `on_search_completed`.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def search(self, directory: Union[Path, str]) -> Set[Path]:
|
||||
"""
|
||||
Recursively search for models in `directory` and return a set of model paths.
|
||||
|
||||
If provided, the `on_search_started`, `on_model_found` and `on_search_completed`
|
||||
Callables will be invoked during the search.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ModelSearch(ModelSearchBase):
|
||||
"""
|
||||
Implementation of ModelSearch with callbacks.
|
||||
Usage:
|
||||
search = ModelSearch()
|
||||
search.model_found = lambda path : 'anime' in path.as_posix()
|
||||
found = search.list_models(['/tmp/models1','/tmp/models2'])
|
||||
# returns all models that have 'anime' in the path
|
||||
"""
|
||||
|
||||
models_found: Set[Path] = Field(default=None)
|
||||
scanned_dirs: Set[Path] = Field(default=None)
|
||||
pruned_paths: Set[Path] = Field(default=None)
|
||||
|
||||
def search_started(self) -> None:
|
||||
self.models_found = set()
|
||||
self.scanned_dirs = set()
|
||||
self.pruned_paths = set()
|
||||
if self.on_search_started:
|
||||
self.on_search_started(self._directory)
|
||||
|
||||
def model_found(self, model: Path) -> None:
|
||||
self.stats.models_found += 1
|
||||
if not self.on_model_found or self.on_model_found(model):
|
||||
self.stats.models_filtered += 1
|
||||
self.models_found.add(model)
|
||||
|
||||
def search_completed(self) -> None:
|
||||
if self.on_search_completed:
|
||||
self.on_search_completed(self._models_found)
|
||||
|
||||
def search(self, directory: Union[Path, str]) -> Set[Path]:
|
||||
self._directory = Path(directory)
|
||||
self.stats = SearchStats() # zero out
|
||||
self.search_started() # This will initialize _models_found to empty
|
||||
self._walk_directory(directory)
|
||||
self.search_completed()
|
||||
return self.models_found
|
||||
|
||||
def _walk_directory(self, path: Union[Path, str]) -> None:
|
||||
for root, dirs, files in os.walk(path, followlinks=True):
|
||||
# don't descend into directories that start with a "."
|
||||
# to avoid the Mac .DS_STORE issue.
|
||||
if str(Path(root).name).startswith("."):
|
||||
self.pruned_paths.add(Path(root))
|
||||
if any(Path(root).is_relative_to(x) for x in self.pruned_paths):
|
||||
continue
|
||||
|
||||
self.stats.items_scanned += len(dirs) + len(files)
|
||||
for d in dirs:
|
||||
path = Path(root) / d
|
||||
if path.parent in self.scanned_dirs:
|
||||
self.scanned_dirs.add(path)
|
||||
continue
|
||||
if any(
|
||||
(path / x).exists()
|
||||
for x in [
|
||||
"config.json",
|
||||
"model_index.json",
|
||||
"learned_embeds.bin",
|
||||
"pytorch_lora_weights.bin",
|
||||
"image_encoder.txt",
|
||||
]
|
||||
):
|
||||
self.scanned_dirs.add(path)
|
||||
try:
|
||||
self.model_found(path)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception as e:
|
||||
self.logger.warning(str(e))
|
||||
|
||||
for f in files:
|
||||
path = Path(root) / f
|
||||
if path.parent in self.scanned_dirs:
|
||||
continue
|
||||
if path.suffix in {".ckpt", ".bin", ".pth", ".safetensors", ".pt"}:
|
||||
try:
|
||||
self.model_found(path)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception as e:
|
||||
self.logger.warning(str(e))
|
||||
@@ -242,6 +242,17 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
control_model: ControlNetModel = None,
|
||||
):
|
||||
super().__init__(
|
||||
vae,
|
||||
text_encoder,
|
||||
tokenizer,
|
||||
unet,
|
||||
scheduler,
|
||||
safety_checker,
|
||||
feature_extractor,
|
||||
requires_safety_checker,
|
||||
)
|
||||
|
||||
self.register_modules(
|
||||
vae=vae,
|
||||
text_encoder=text_encoder,
|
||||
tokenizer=tokenizer,
|
||||
@@ -249,9 +260,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
scheduler=scheduler,
|
||||
safety_checker=safety_checker,
|
||||
feature_extractor=feature_extractor,
|
||||
requires_safety_checker=requires_safety_checker,
|
||||
# FIXME: can't currently register control module
|
||||
# control_model=control_model,
|
||||
)
|
||||
|
||||
self.invokeai_diffuser = InvokeAIDiffuserComponent(self.unet, self._unet_forward)
|
||||
self.control_model = control_model
|
||||
self.use_ip_adapter = False
|
||||
|
||||
@@ -3,42 +3,7 @@ from typing import Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from invokeai.app.invocations.latent import LATENT_SCALE_FACTOR
|
||||
from invokeai.backend.tiles.utils import TBLR, Tile, paste, seam_blend
|
||||
|
||||
|
||||
def calc_overlap(tiles: list[Tile], num_tiles_x: int, num_tiles_y: int) -> list[Tile]:
|
||||
"""Calculate and update the overlap of a list of tiles.
|
||||
|
||||
Args:
|
||||
tiles (list[Tile]): The list of tiles describing the locations of the respective `tile_images`.
|
||||
num_tiles_x: the number of tiles on the x axis.
|
||||
num_tiles_y: the number of tiles on the y axis.
|
||||
"""
|
||||
|
||||
def get_tile_or_none(idx_y: int, idx_x: int) -> Union[Tile, None]:
|
||||
if idx_y < 0 or idx_y > num_tiles_y or idx_x < 0 or idx_x > num_tiles_x:
|
||||
return None
|
||||
return tiles[idx_y * num_tiles_x + idx_x]
|
||||
|
||||
for tile_idx_y in range(num_tiles_y):
|
||||
for tile_idx_x in range(num_tiles_x):
|
||||
cur_tile = get_tile_or_none(tile_idx_y, tile_idx_x)
|
||||
top_neighbor_tile = get_tile_or_none(tile_idx_y - 1, tile_idx_x)
|
||||
left_neighbor_tile = get_tile_or_none(tile_idx_y, tile_idx_x - 1)
|
||||
|
||||
assert cur_tile is not None
|
||||
|
||||
# Update cur_tile top-overlap and corresponding top-neighbor bottom-overlap.
|
||||
if top_neighbor_tile is not None:
|
||||
cur_tile.overlap.top = max(0, top_neighbor_tile.coords.bottom - cur_tile.coords.top)
|
||||
top_neighbor_tile.overlap.bottom = cur_tile.overlap.top
|
||||
|
||||
# Update cur_tile left-overlap and corresponding left-neighbor right-overlap.
|
||||
if left_neighbor_tile is not None:
|
||||
cur_tile.overlap.left = max(0, left_neighbor_tile.coords.right - cur_tile.coords.left)
|
||||
left_neighbor_tile.overlap.right = cur_tile.overlap.left
|
||||
return tiles
|
||||
from invokeai.backend.tiles.utils import TBLR, Tile, paste
|
||||
|
||||
|
||||
def calc_tiles_with_overlap(
|
||||
@@ -98,133 +63,31 @@ def calc_tiles_with_overlap(
|
||||
|
||||
tiles.append(tile)
|
||||
|
||||
return calc_overlap(tiles, num_tiles_x, num_tiles_y)
|
||||
def get_tile_or_none(idx_y: int, idx_x: int) -> Union[Tile, None]:
|
||||
if idx_y < 0 or idx_y > num_tiles_y or idx_x < 0 or idx_x > num_tiles_x:
|
||||
return None
|
||||
return tiles[idx_y * num_tiles_x + idx_x]
|
||||
|
||||
|
||||
def calc_tiles_even_split(
|
||||
image_height: int, image_width: int, num_tiles_x: int, num_tiles_y: int, overlap: int = 0
|
||||
) -> list[Tile]:
|
||||
"""Calculate the tile coordinates for a given image shape with the number of tiles requested.
|
||||
|
||||
Args:
|
||||
image_height (int): The image height in px.
|
||||
image_width (int): The image width in px.
|
||||
num_x_tiles (int): The number of tile to split the image into on the X-axis.
|
||||
num_y_tiles (int): The number of tile to split the image into on the Y-axis.
|
||||
overlap (int, optional): The overlap between adjacent tiles in pixels. Defaults to 0.
|
||||
|
||||
Returns:
|
||||
list[Tile]: A list of tiles that cover the image shape. Ordered from left-to-right, top-to-bottom.
|
||||
"""
|
||||
# Ensure the image is divisible by LATENT_SCALE_FACTOR
|
||||
if image_width % LATENT_SCALE_FACTOR != 0 or image_height % LATENT_SCALE_FACTOR != 0:
|
||||
raise ValueError(f"image size (({image_width}, {image_height})) must be divisible by {LATENT_SCALE_FACTOR}")
|
||||
|
||||
# Calculate the tile size based on the number of tiles and overlap, and ensure it's divisible by 8 (rounding down)
|
||||
if num_tiles_x > 1:
|
||||
# ensure the overlap is not more than the maximum overlap if we only have 1 tile then we dont care about overlap
|
||||
assert overlap <= image_width - (LATENT_SCALE_FACTOR * (num_tiles_x - 1))
|
||||
tile_size_x = LATENT_SCALE_FACTOR * math.floor(
|
||||
((image_width + overlap * (num_tiles_x - 1)) // num_tiles_x) / LATENT_SCALE_FACTOR
|
||||
)
|
||||
assert overlap < tile_size_x
|
||||
else:
|
||||
tile_size_x = image_width
|
||||
|
||||
if num_tiles_y > 1:
|
||||
# ensure the overlap is not more than the maximum overlap if we only have 1 tile then we dont care about overlap
|
||||
assert overlap <= image_height - (LATENT_SCALE_FACTOR * (num_tiles_y - 1))
|
||||
tile_size_y = LATENT_SCALE_FACTOR * math.floor(
|
||||
((image_height + overlap * (num_tiles_y - 1)) // num_tiles_y) / LATENT_SCALE_FACTOR
|
||||
)
|
||||
assert overlap < tile_size_y
|
||||
else:
|
||||
tile_size_y = image_height
|
||||
|
||||
# tiles[y * num_tiles_x + x] is the tile for the y'th row, x'th column.
|
||||
tiles: list[Tile] = []
|
||||
|
||||
# Calculate tile coordinates. (Ignore overlap values for now.)
|
||||
# Iterate over tiles again and calculate overlaps.
|
||||
for tile_idx_y in range(num_tiles_y):
|
||||
# Calculate the top and bottom of the row
|
||||
top = tile_idx_y * (tile_size_y - overlap)
|
||||
bottom = min(top + tile_size_y, image_height)
|
||||
# For the last row adjust bottom to be the height of the image
|
||||
if tile_idx_y == num_tiles_y - 1:
|
||||
bottom = image_height
|
||||
|
||||
for tile_idx_x in range(num_tiles_x):
|
||||
# Calculate the left & right coordinate of each tile
|
||||
left = tile_idx_x * (tile_size_x - overlap)
|
||||
right = min(left + tile_size_x, image_width)
|
||||
# For the last tile in the row adjust right to be the width of the image
|
||||
if tile_idx_x == num_tiles_x - 1:
|
||||
right = image_width
|
||||
cur_tile = get_tile_or_none(tile_idx_y, tile_idx_x)
|
||||
top_neighbor_tile = get_tile_or_none(tile_idx_y - 1, tile_idx_x)
|
||||
left_neighbor_tile = get_tile_or_none(tile_idx_y, tile_idx_x - 1)
|
||||
|
||||
tile = Tile(
|
||||
coords=TBLR(top=top, bottom=bottom, left=left, right=right),
|
||||
overlap=TBLR(top=0, bottom=0, left=0, right=0),
|
||||
)
|
||||
assert cur_tile is not None
|
||||
|
||||
tiles.append(tile)
|
||||
# Update cur_tile top-overlap and corresponding top-neighbor bottom-overlap.
|
||||
if top_neighbor_tile is not None:
|
||||
cur_tile.overlap.top = max(0, top_neighbor_tile.coords.bottom - cur_tile.coords.top)
|
||||
top_neighbor_tile.overlap.bottom = cur_tile.overlap.top
|
||||
|
||||
return calc_overlap(tiles, num_tiles_x, num_tiles_y)
|
||||
# Update cur_tile left-overlap and corresponding left-neighbor right-overlap.
|
||||
if left_neighbor_tile is not None:
|
||||
cur_tile.overlap.left = max(0, left_neighbor_tile.coords.right - cur_tile.coords.left)
|
||||
left_neighbor_tile.overlap.right = cur_tile.overlap.left
|
||||
|
||||
|
||||
def calc_tiles_min_overlap(
|
||||
image_height: int,
|
||||
image_width: int,
|
||||
tile_height: int,
|
||||
tile_width: int,
|
||||
min_overlap: int = 0,
|
||||
) -> list[Tile]:
|
||||
"""Calculate the tile coordinates for a given image shape under a simple tiling scheme with overlaps.
|
||||
|
||||
Args:
|
||||
image_height (int): The image height in px.
|
||||
image_width (int): The image width in px.
|
||||
tile_height (int): The tile height in px. All tiles will have this height.
|
||||
tile_width (int): The tile width in px. All tiles will have this width.
|
||||
min_overlap (int): The target minimum overlap between adjacent tiles. If the tiles do not evenly cover the image
|
||||
shape, then the overlap will be spread between the tiles.
|
||||
|
||||
Returns:
|
||||
list[Tile]: A list of tiles that cover the image shape. Ordered from left-to-right, top-to-bottom.
|
||||
"""
|
||||
|
||||
assert min_overlap < tile_height
|
||||
assert min_overlap < tile_width
|
||||
|
||||
# catches the cases when the tile size is larger than the images size and adjusts the tile size
|
||||
if image_width < tile_width:
|
||||
tile_width = image_width
|
||||
|
||||
if image_height < tile_height:
|
||||
tile_height = image_height
|
||||
|
||||
num_tiles_x = math.ceil((image_width - min_overlap) / (tile_width - min_overlap))
|
||||
num_tiles_y = math.ceil((image_height - min_overlap) / (tile_height - min_overlap))
|
||||
|
||||
# tiles[y * num_tiles_x + x] is the tile for the y'th row, x'th column.
|
||||
tiles: list[Tile] = []
|
||||
|
||||
# Calculate tile coordinates. (Ignore overlap values for now.)
|
||||
for tile_idx_y in range(num_tiles_y):
|
||||
top = (tile_idx_y * (image_height - tile_height)) // (num_tiles_y - 1) if num_tiles_y > 1 else 0
|
||||
bottom = top + tile_height
|
||||
|
||||
for tile_idx_x in range(num_tiles_x):
|
||||
left = (tile_idx_x * (image_width - tile_width)) // (num_tiles_x - 1) if num_tiles_x > 1 else 0
|
||||
right = left + tile_width
|
||||
|
||||
tile = Tile(
|
||||
coords=TBLR(top=top, bottom=bottom, left=left, right=right),
|
||||
overlap=TBLR(top=0, bottom=0, left=0, right=0),
|
||||
)
|
||||
|
||||
tiles.append(tile)
|
||||
|
||||
return calc_overlap(tiles, num_tiles_x, num_tiles_y)
|
||||
return tiles
|
||||
|
||||
|
||||
def merge_tiles_with_linear_blending(
|
||||
@@ -336,91 +199,3 @@ def merge_tiles_with_linear_blending(
|
||||
),
|
||||
mask=mask,
|
||||
)
|
||||
|
||||
|
||||
def merge_tiles_with_seam_blending(
|
||||
dst_image: np.ndarray, tiles: list[Tile], tile_images: list[np.ndarray], blend_amount: int
|
||||
):
|
||||
"""Merge a set of image tiles into `dst_image` with seam blending between the tiles.
|
||||
|
||||
We expect every tile edge to either:
|
||||
1) have an overlap of 0, because it is aligned with the image edge, or
|
||||
2) have an overlap >= blend_amount.
|
||||
If neither of these conditions are satisfied, we raise an exception.
|
||||
|
||||
The seam blending is centered on a seam of least energy of the overlap between adjacent tiles.
|
||||
|
||||
Args:
|
||||
dst_image (np.ndarray): The destination image. Shape: (H, W, C).
|
||||
tiles (list[Tile]): The list of tiles describing the locations of the respective `tile_images`.
|
||||
tile_images (list[np.ndarray]): The tile images to merge into `dst_image`.
|
||||
blend_amount (int): The amount of blending (in px) between adjacent overlapping tiles.
|
||||
"""
|
||||
# Sort tiles and images first by left x coordinate, then by top y coordinate. During tile processing, we want to
|
||||
# iterate over tiles left-to-right, top-to-bottom.
|
||||
tiles_and_images = list(zip(tiles, tile_images, strict=True))
|
||||
tiles_and_images = sorted(tiles_and_images, key=lambda x: x[0].coords.left)
|
||||
tiles_and_images = sorted(tiles_and_images, key=lambda x: x[0].coords.top)
|
||||
|
||||
# Organize tiles into rows.
|
||||
tile_and_image_rows: list[list[tuple[Tile, np.ndarray]]] = []
|
||||
cur_tile_and_image_row: list[tuple[Tile, np.ndarray]] = []
|
||||
first_tile_in_cur_row, _ = tiles_and_images[0]
|
||||
for tile_and_image in tiles_and_images:
|
||||
tile, _ = tile_and_image
|
||||
if not (
|
||||
tile.coords.top == first_tile_in_cur_row.coords.top
|
||||
and tile.coords.bottom == first_tile_in_cur_row.coords.bottom
|
||||
):
|
||||
# Store the previous row, and start a new one.
|
||||
tile_and_image_rows.append(cur_tile_and_image_row)
|
||||
cur_tile_and_image_row = []
|
||||
first_tile_in_cur_row, _ = tile_and_image
|
||||
|
||||
cur_tile_and_image_row.append(tile_and_image)
|
||||
tile_and_image_rows.append(cur_tile_and_image_row)
|
||||
|
||||
for tile_and_image_row in tile_and_image_rows:
|
||||
first_tile_in_row, _ = tile_and_image_row[0]
|
||||
row_height = first_tile_in_row.coords.bottom - first_tile_in_row.coords.top
|
||||
row_image = np.zeros((row_height, dst_image.shape[1], dst_image.shape[2]), dtype=dst_image.dtype)
|
||||
|
||||
# Blend the tiles in the row horizontally.
|
||||
for tile, tile_image in tile_and_image_row:
|
||||
# We expect the tiles to be ordered left-to-right.
|
||||
# For each tile:
|
||||
# - extract the overlap regions and pass to seam_blend()
|
||||
# - apply blended region to the row_image
|
||||
# - apply the un-blended region to the row_image
|
||||
tile_height, tile_width, _ = tile_image.shape
|
||||
overlap_size = tile.overlap.left
|
||||
# Left blending:
|
||||
if overlap_size > 0:
|
||||
assert overlap_size >= blend_amount
|
||||
|
||||
overlap_coord_right = tile.coords.left + overlap_size
|
||||
src_overlap = row_image[:, tile.coords.left : overlap_coord_right]
|
||||
dst_overlap = tile_image[:, :overlap_size]
|
||||
blended_overlap = seam_blend(src_overlap, dst_overlap, blend_amount, x_seam=False)
|
||||
row_image[:, tile.coords.left : overlap_coord_right] = blended_overlap
|
||||
row_image[:, overlap_coord_right : tile.coords.right] = tile_image[:, overlap_size:]
|
||||
else:
|
||||
# no overlap just paste the tile
|
||||
row_image[:, tile.coords.left : tile.coords.right] = tile_image
|
||||
|
||||
# Blend the row into the dst_image
|
||||
# We assume that the entire row has the same vertical overlaps as the first_tile_in_row.
|
||||
# Rows are processed in the same way as tiles (extract overlap, blend, apply)
|
||||
row_overlap_size = first_tile_in_row.overlap.top
|
||||
if row_overlap_size > 0:
|
||||
assert row_overlap_size >= blend_amount
|
||||
|
||||
overlap_coords_bottom = first_tile_in_row.coords.top + row_overlap_size
|
||||
src_overlap = dst_image[first_tile_in_row.coords.top : overlap_coords_bottom, :]
|
||||
dst_overlap = row_image[:row_overlap_size, :]
|
||||
blended_overlap = seam_blend(src_overlap, dst_overlap, blend_amount, x_seam=True)
|
||||
dst_image[first_tile_in_row.coords.top : overlap_coords_bottom, :] = blended_overlap
|
||||
dst_image[overlap_coords_bottom : first_tile_in_row.coords.bottom, :] = row_image[row_overlap_size:, :]
|
||||
else:
|
||||
# no overlap just paste the row
|
||||
dst_image[first_tile_in_row.coords.top : first_tile_in_row.coords.bottom, :] = row_image
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
import math
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -33,10 +31,10 @@ def paste(dst_image: np.ndarray, src_image: np.ndarray, box: TBLR, mask: Optiona
|
||||
"""Paste a source image into a destination image.
|
||||
|
||||
Args:
|
||||
dst_image (np.array): The destination image to paste into. Shape: (H, W, C).
|
||||
src_image (np.array): The source image to paste. Shape: (H, W, C). H and W must be compatible with 'box'.
|
||||
dst_image (torch.Tensor): The destination image to paste into. Shape: (H, W, C).
|
||||
src_image (torch.Tensor): The source image to paste. Shape: (H, W, C). H and W must be compatible with 'box'.
|
||||
box (TBLR): Box defining the region in the 'dst_image' where 'src_image' will be pasted.
|
||||
mask (Optional[np.array]): A mask that defines the blending between 'src_image' and 'dst_image'.
|
||||
mask (Optional[torch.Tensor]): A mask that defines the blending between 'src_image' and 'dst_image'.
|
||||
Range: [0.0, 1.0], Shape: (H, W). The output is calculate per-pixel according to
|
||||
`src * mask + dst * (1 - mask)`.
|
||||
"""
|
||||
@@ -47,106 +45,3 @@ def paste(dst_image: np.ndarray, src_image: np.ndarray, box: TBLR, mask: Optiona
|
||||
mask = np.expand_dims(mask, -1)
|
||||
dst_image_box = dst_image[box.top : box.bottom, box.left : box.right]
|
||||
dst_image[box.top : box.bottom, box.left : box.right] = src_image * mask + dst_image_box * (1.0 - mask)
|
||||
|
||||
|
||||
def seam_blend(ia1: np.ndarray, ia2: np.ndarray, blend_amount: int, x_seam: bool) -> np.ndarray:
|
||||
"""Blend two overlapping tile sections using a seams to find a path.
|
||||
|
||||
It is assumed that input images will be RGB np arrays and are the same size.
|
||||
|
||||
Args:
|
||||
ia1 (np.array): Image array 1 Shape: (H, W, C).
|
||||
ia2 (np.array): Image array 2 Shape: (H, W, C).
|
||||
x_seam (bool): If the images should be blended on the x axis or not.
|
||||
blend_amount (int): The size of the blur to use on the seam. Half of this value will be used to avoid the edges of the image.
|
||||
"""
|
||||
assert ia1.shape == ia2.shape
|
||||
assert ia2.size == ia2.size
|
||||
|
||||
def shift(arr, num, fill_value=255.0):
|
||||
result = np.full_like(arr, fill_value)
|
||||
if num > 0:
|
||||
result[num:] = arr[:-num]
|
||||
elif num < 0:
|
||||
result[:num] = arr[-num:]
|
||||
else:
|
||||
result[:] = arr
|
||||
return result
|
||||
|
||||
# Assume RGB and convert to grey
|
||||
# Could offer other options for the luminance conversion
|
||||
# BT.709 [0.2126, 0.7152, 0.0722], BT.2020 [0.2627, 0.6780, 0.0593])
|
||||
# it might not have a huge impact due to the blur that is applied over the seam
|
||||
iag1 = np.dot(ia1, [0.2989, 0.5870, 0.1140]) # BT.601 perceived brightness
|
||||
iag2 = np.dot(ia2, [0.2989, 0.5870, 0.1140])
|
||||
|
||||
# Calc Difference between the images
|
||||
ia = iag2 - iag1
|
||||
|
||||
# If the seam is on the X-axis rotate the array so we can treat it like a vertical seam
|
||||
if x_seam:
|
||||
ia = np.rot90(ia, 1)
|
||||
|
||||
# Calc max and min X & Y limits
|
||||
# gutter is used to avoid the blur hitting the edge of the image
|
||||
gutter = math.ceil(blend_amount / 2) if blend_amount > 0 else 0
|
||||
max_y, max_x = ia.shape
|
||||
max_x -= gutter
|
||||
min_x = gutter
|
||||
|
||||
# Calc the energy in the difference
|
||||
# Could offer different energy calculations e.g. Sobel or Scharr
|
||||
energy = np.abs(np.gradient(ia, axis=0)) + np.abs(np.gradient(ia, axis=1))
|
||||
|
||||
# Find the starting position of the seam
|
||||
res = np.copy(energy)
|
||||
for y in range(1, max_y):
|
||||
row = res[y, :]
|
||||
rowl = shift(row, -1)
|
||||
rowr = shift(row, 1)
|
||||
res[y, :] = res[y - 1, :] + np.min([row, rowl, rowr], axis=0)
|
||||
|
||||
# create an array max_y long
|
||||
lowest_energy_line = np.empty([max_y], dtype="uint16")
|
||||
lowest_energy_line[max_y - 1] = np.argmin(res[max_y - 1, min_x : max_x - 1])
|
||||
|
||||
# Calc the path of the seam
|
||||
# could offer options for larger search than just 1 pixel by adjusting lpos and rpos
|
||||
for ypos in range(max_y - 2, -1, -1):
|
||||
lowest_pos = lowest_energy_line[ypos + 1]
|
||||
lpos = lowest_pos - 1
|
||||
rpos = lowest_pos + 1
|
||||
lpos = np.clip(lpos, min_x, max_x - 1)
|
||||
rpos = np.clip(rpos, min_x, max_x - 1)
|
||||
lowest_energy_line[ypos] = np.argmin(energy[ypos, lpos : rpos + 1]) + lpos
|
||||
|
||||
# Draw the mask
|
||||
mask = np.zeros_like(ia)
|
||||
for ypos in range(0, max_y):
|
||||
to_fill = lowest_energy_line[ypos]
|
||||
mask[ypos, :to_fill] = 1
|
||||
|
||||
# If the seam is on the X-axis rotate the array back
|
||||
if x_seam:
|
||||
mask = np.rot90(mask, 3)
|
||||
|
||||
# blur the seam mask if required
|
||||
if blend_amount > 0:
|
||||
mask = cv2.blur(mask, (blend_amount, blend_amount))
|
||||
|
||||
# for visual debugging
|
||||
# from PIL import Image
|
||||
# m_image = Image.fromarray((mask * 255.0).astype("uint8"))
|
||||
|
||||
# copy ia2 over ia1 while applying the seam mask
|
||||
mask = np.expand_dims(mask, -1)
|
||||
blended_image = ia1 * mask + ia2 * (1.0 - mask)
|
||||
|
||||
# for visual debugging
|
||||
# i1 = Image.fromarray(ia1.astype("uint8"))
|
||||
# i2 = Image.fromarray(ia2.astype("uint8"))
|
||||
# b_image = Image.fromarray(blended_image.astype("uint8"))
|
||||
# print(f"{ia1.shape}, {ia2.shape}, {mask.shape}, {blended_image.shape}")
|
||||
# print(f"{i1.size}, {i2.size}, {m_image.size}, {b_image.size}")
|
||||
|
||||
return blended_image
|
||||
|
||||
@@ -11,7 +11,4 @@ from .devices import ( # noqa: F401
|
||||
normalize_device,
|
||||
torch_dtype,
|
||||
)
|
||||
from .logging import InvokeAILogger
|
||||
from .util import Chdir, ask_user, download_with_resume, instantiate_from_config, url_attachment_name # noqa: F401
|
||||
|
||||
__all__ = ["Chdir", "InvokeAILogger", "choose_precision", "choose_torch_device"]
|
||||
|
||||
@@ -342,13 +342,14 @@ class InvokeAILogger(object): # noqa D102
|
||||
cls, name: str = "InvokeAI", config: InvokeAIAppConfig = InvokeAIAppConfig.get_config()
|
||||
) -> logging.Logger: # noqa D102
|
||||
if name in cls.loggers:
|
||||
return cls.loggers[name]
|
||||
|
||||
logger = logging.getLogger(name)
|
||||
logger = cls.loggers[name]
|
||||
logger.handlers.clear()
|
||||
else:
|
||||
logger = logging.getLogger(name)
|
||||
logger.setLevel(config.log_level.upper()) # yes, strings work here
|
||||
for ch in cls.get_loggers(config):
|
||||
logger.addHandler(ch)
|
||||
cls.loggers[name] = logger
|
||||
cls.loggers[name] = logger
|
||||
return cls.loggers[name]
|
||||
|
||||
@classmethod
|
||||
@@ -357,7 +358,7 @@ class InvokeAILogger(object): # noqa D102
|
||||
handlers = []
|
||||
for handler in handler_strs:
|
||||
handler_name, *args = handler.split("=", 2)
|
||||
arg = args[0] if len(args) > 0 else None
|
||||
args = args[0] if len(args) > 0 else None
|
||||
|
||||
# console and file get the fancy formatter.
|
||||
# syslog gets a simple one
|
||||
@@ -369,16 +370,16 @@ class InvokeAILogger(object): # noqa D102
|
||||
handlers.append(ch)
|
||||
|
||||
elif handler_name == "syslog":
|
||||
ch = cls._parse_syslog_args(arg)
|
||||
ch = cls._parse_syslog_args(args)
|
||||
handlers.append(ch)
|
||||
|
||||
elif handler_name == "file":
|
||||
ch = cls._parse_file_args(arg)
|
||||
ch = cls._parse_file_args(args)
|
||||
ch.setFormatter(formatter())
|
||||
handlers.append(ch)
|
||||
|
||||
elif handler_name == "http":
|
||||
ch = cls._parse_http_args(arg)
|
||||
ch = cls._parse_http_args(args)
|
||||
handlers.append(ch)
|
||||
return handlers
|
||||
|
||||
|
||||
@@ -32,9 +32,9 @@ sd-1/main/Analog-Diffusion:
|
||||
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
||||
repo_id: wavymulder/Analog-Diffusion
|
||||
recommended: False
|
||||
sd-1/main/Deliberate_v5:
|
||||
sd-1/main/Deliberate:
|
||||
description: Versatile model that produces detailed images up to 768px (4.27 GB)
|
||||
path: https://huggingface.co/XpucT/Deliberate/resolve/main/Deliberate_v5.safetensors
|
||||
repo_id: XpucT/Deliberate
|
||||
recommended: False
|
||||
sd-1/main/Dungeons-and-Diffusion:
|
||||
description: Dungeons & Dragons characters (2.13 GB)
|
||||
|
||||
@@ -4,7 +4,6 @@ pip install <path_to_git_source>.
|
||||
"""
|
||||
import os
|
||||
import platform
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
import pkg_resources
|
||||
import psutil
|
||||
@@ -32,6 +31,10 @@ else:
|
||||
console = Console(style=Style(color="grey74", bgcolor="grey19"))
|
||||
|
||||
|
||||
def get_versions() -> dict:
|
||||
return requests.get(url=INVOKE_AI_REL).json()
|
||||
|
||||
|
||||
def invokeai_is_running() -> bool:
|
||||
for p in psutil.process_iter():
|
||||
try:
|
||||
@@ -47,20 +50,6 @@ def invokeai_is_running() -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def get_pypi_versions():
|
||||
url = "https://pypi.org/pypi/invokeai/json"
|
||||
try:
|
||||
data = requests.get(url).json()
|
||||
except Exception:
|
||||
raise Exception("Unable to fetch version information from PyPi")
|
||||
|
||||
versions = list(data["releases"].keys())
|
||||
versions.sort(key=LooseVersion, reverse=True)
|
||||
latest_version = [v for v in versions if "rc" not in v][0]
|
||||
latest_release_candidate = [v for v in versions if "rc" in v][0]
|
||||
return latest_version, latest_release_candidate, versions
|
||||
|
||||
|
||||
def welcome(latest_release: str, latest_prerelease: str):
|
||||
@group()
|
||||
def text():
|
||||
@@ -74,7 +63,8 @@ def welcome(latest_release: str, latest_prerelease: str):
|
||||
yield "[bold yellow]Options:"
|
||||
yield f"""[1] Update to the latest [bold]official release[/bold] ([italic]{latest_release}[/italic])
|
||||
[2] Update to the latest [bold]pre-release[/bold] (may be buggy; caveat emptor!) ([italic]{latest_prerelease}[/italic])
|
||||
[3] Manually enter the [bold]version[/bold] you wish to update to"""
|
||||
[3] Manually enter the [bold]tag name[/bold] for the version you wish to update to
|
||||
[4] Manually enter the [bold]branch name[/bold] for the version you wish to update to"""
|
||||
|
||||
console.rule()
|
||||
print(
|
||||
@@ -102,35 +92,44 @@ def get_extras():
|
||||
|
||||
|
||||
def main():
|
||||
versions = get_versions()
|
||||
released_versions = [x for x in versions if not (x["draft"] or x["prerelease"])]
|
||||
prerelease_versions = [x for x in versions if not x["draft"] and x["prerelease"]]
|
||||
latest_release = released_versions[0]["tag_name"] if len(released_versions) else None
|
||||
latest_prerelease = prerelease_versions[0]["tag_name"] if len(prerelease_versions) else None
|
||||
|
||||
if invokeai_is_running():
|
||||
print(":exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]")
|
||||
input("Press any key to continue...")
|
||||
return
|
||||
|
||||
latest_release, latest_prerelease, versions = get_pypi_versions()
|
||||
|
||||
welcome(latest_release, latest_prerelease)
|
||||
|
||||
release = latest_release
|
||||
choice = Prompt.ask("Choice:", choices=["1", "2", "3"], default="1")
|
||||
tag = None
|
||||
branch = None
|
||||
release = None
|
||||
choice = Prompt.ask("Choice:", choices=["1", "2", "3", "4"], default="1")
|
||||
|
||||
if choice == "1":
|
||||
release = latest_release
|
||||
elif choice == "2":
|
||||
release = latest_prerelease
|
||||
elif choice == "3":
|
||||
while True:
|
||||
release = Prompt.ask("Enter an InvokeAI version")
|
||||
release.strip()
|
||||
if release in versions:
|
||||
break
|
||||
print(f":exclamation: [bold red]'{release}' is not a recognized InvokeAI release.[/red bold]")
|
||||
while not tag:
|
||||
tag = Prompt.ask("Enter an InvokeAI tag name")
|
||||
elif choice == "4":
|
||||
while not branch:
|
||||
branch = Prompt.ask("Enter an InvokeAI branch name")
|
||||
|
||||
extras = get_extras()
|
||||
|
||||
print(f":crossed_fingers: Upgrading to [yellow]{release}[/yellow]")
|
||||
cmd = f'pip install "invokeai{extras}=={release}" --use-pep517 --upgrade'
|
||||
|
||||
print(f":crossed_fingers: Upgrading to [yellow]{tag or release or branch}[/yellow]")
|
||||
if release:
|
||||
cmd = f'pip install "invokeai{extras} @ {INVOKE_AI_SRC}/{release}.zip" --use-pep517 --upgrade'
|
||||
elif tag:
|
||||
cmd = f'pip install "invokeai{extras} @ {INVOKE_AI_TAG}/{tag}.zip" --use-pep517 --upgrade'
|
||||
else:
|
||||
cmd = f'pip install "invokeai{extras} @ {INVOKE_AI_BRANCH}/{branch}.zip" --use-pep517 --upgrade'
|
||||
print("")
|
||||
print("")
|
||||
if os.system(cmd) == 0:
|
||||
|
||||
@@ -11,7 +11,6 @@ module.exports = {
|
||||
'plugin:react-hooks/recommended',
|
||||
'plugin:react/jsx-runtime',
|
||||
'prettier',
|
||||
'plugin:storybook/recommended',
|
||||
],
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
@@ -27,7 +26,6 @@ module.exports = {
|
||||
'eslint-plugin-react-hooks',
|
||||
'i18next',
|
||||
'path',
|
||||
'unused-imports',
|
||||
],
|
||||
root: true,
|
||||
rules: {
|
||||
@@ -46,16 +44,9 @@ module.exports = {
|
||||
radix: 'error',
|
||||
'space-before-blocks': 'error',
|
||||
'import/prefer-default-export': 'off',
|
||||
'@typescript-eslint/no-unused-vars': 'off',
|
||||
'unused-imports/no-unused-imports': 'error',
|
||||
'unused-imports/no-unused-vars': [
|
||||
'@typescript-eslint/no-unused-vars': [
|
||||
'warn',
|
||||
{
|
||||
vars: 'all',
|
||||
varsIgnorePattern: '^_',
|
||||
args: 'after-used',
|
||||
argsIgnorePattern: '^_',
|
||||
},
|
||||
{ varsIgnorePattern: '^_', argsIgnorePattern: '^_' },
|
||||
],
|
||||
'@typescript-eslint/ban-ts-comment': 'warn',
|
||||
'@typescript-eslint/no-explicit-any': 'warn',
|
||||
|
||||
5
invokeai/frontend/web/.gitignore
vendored
5
invokeai/frontend/web/.gitignore
vendored
@@ -9,8 +9,7 @@ lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
# We want to distribute the repo
|
||||
dist
|
||||
dist/**
|
||||
# dist
|
||||
dist-ssr
|
||||
*.local
|
||||
|
||||
@@ -39,4 +38,4 @@ stats.html
|
||||
|
||||
# Yalc
|
||||
.yalc
|
||||
yalc.lock
|
||||
yalc.lock
|
||||
4
invokeai/frontend/web/.husky/pre-commit
Executable file
4
invokeai/frontend/web/.husky/pre-commit
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
cd invokeai/frontend/web/ && npm run lint-staged
|
||||
@@ -12,4 +12,3 @@ index.html
|
||||
src/services/api/schema.d.ts
|
||||
static/
|
||||
src/theme/css/overlayscrollbars.css
|
||||
pnpm-lock.yaml
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
import type { StorybookConfig } from '@storybook/react-vite';
|
||||
|
||||
const config: StorybookConfig = {
|
||||
stories: ['../src/**/*.mdx', '../src/**/*.stories.@(js|jsx|mjs|ts|tsx)'],
|
||||
addons: [
|
||||
'@storybook/addon-links',
|
||||
'@storybook/addon-essentials',
|
||||
'@storybook/addon-interactions',
|
||||
],
|
||||
framework: {
|
||||
name: '@storybook/react-vite',
|
||||
options: {},
|
||||
},
|
||||
docs: {
|
||||
autodocs: 'tag',
|
||||
},
|
||||
core: {
|
||||
disableTelemetry: true,
|
||||
},
|
||||
};
|
||||
export default config;
|
||||
@@ -1,6 +0,0 @@
|
||||
import { addons } from '@storybook/manager-api';
|
||||
import { themes } from '@storybook/theming';
|
||||
|
||||
addons.setConfig({
|
||||
theme: themes.dark,
|
||||
});
|
||||
@@ -1,47 +0,0 @@
|
||||
import { Preview } from '@storybook/react';
|
||||
import { themes } from '@storybook/theming';
|
||||
import i18n from 'i18next';
|
||||
import React from 'react';
|
||||
import { initReactI18next } from 'react-i18next';
|
||||
import { Provider } from 'react-redux';
|
||||
import GlobalHotkeys from '../src/app/components/GlobalHotkeys';
|
||||
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
|
||||
import { createStore } from '../src/app/store/store';
|
||||
// TODO: Disabled for IDE performance issues with our translation JSON
|
||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||
// @ts-ignore
|
||||
import translationEN from '../public/locales/en.json';
|
||||
|
||||
i18n.use(initReactI18next).init({
|
||||
lng: 'en',
|
||||
resources: {
|
||||
en: { translation: translationEN },
|
||||
},
|
||||
debug: true,
|
||||
interpolation: {
|
||||
escapeValue: false,
|
||||
},
|
||||
returnNull: false,
|
||||
});
|
||||
|
||||
const store = createStore(undefined, false);
|
||||
|
||||
const preview: Preview = {
|
||||
decorators: [
|
||||
(Story) => (
|
||||
<Provider store={store}>
|
||||
<ThemeLocaleProvider>
|
||||
<GlobalHotkeys />
|
||||
<Story />
|
||||
</ThemeLocaleProvider>
|
||||
</Provider>
|
||||
),
|
||||
],
|
||||
parameters: {
|
||||
docs: {
|
||||
theme: themes.dark,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
export default preview;
|
||||
193957
invokeai/frontend/web/.yarn/releases/yarn-1.22.19.cjs
vendored
Normal file
193957
invokeai/frontend/web/.yarn/releases/yarn-1.22.19.cjs
vendored
Normal file
File diff suppressed because one or more lines are too long
5
invokeai/frontend/web/.yarnrc
Normal file
5
invokeai/frontend/web/.yarnrc
Normal file
@@ -0,0 +1,5 @@
|
||||
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
|
||||
# yarn lockfile v1
|
||||
|
||||
|
||||
yarn-path ".yarn/releases/yarn-1.22.19.cjs"
|
||||
1
invokeai/frontend/web/.yarnrc.yml
Normal file
1
invokeai/frontend/web/.yarnrc.yml
Normal file
@@ -0,0 +1 @@
|
||||
yarnPath: .yarn/releases/yarn-1.22.19.cjs
|
||||
1
invokeai/frontend/web/dist/assets/App-6125620a.css
vendored
Normal file
1
invokeai/frontend/web/dist/assets/App-6125620a.css
vendored
Normal file
File diff suppressed because one or more lines are too long
171
invokeai/frontend/web/dist/assets/App-6440ab3b.js
vendored
Normal file
171
invokeai/frontend/web/dist/assets/App-6440ab3b.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
invokeai/frontend/web/dist/assets/MantineProvider-a6a1d85c.js
vendored
Normal file
1
invokeai/frontend/web/dist/assets/MantineProvider-a6a1d85c.js
vendored
Normal file
File diff suppressed because one or more lines are too long
280
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-9d17ef9a.js
vendored
Normal file
280
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-9d17ef9a.js
vendored
Normal file
@@ -0,0 +1,280 @@
|
||||
import{I as s,ie as T,v as l,$ as A,ig as R,aa as V,ih as z,ii as j,ij as D,ik as F,il as G,im as W,io as K,az as H,ip as U,iq as Y}from"./index-f820e2e3.js";import{M as Z}from"./MantineProvider-a6a1d85c.js";var P=String.raw,E=P`
|
||||
:root,
|
||||
:host {
|
||||
--chakra-vh: 100vh;
|
||||
}
|
||||
|
||||
@supports (height: -webkit-fill-available) {
|
||||
:root,
|
||||
:host {
|
||||
--chakra-vh: -webkit-fill-available;
|
||||
}
|
||||
}
|
||||
|
||||
@supports (height: -moz-fill-available) {
|
||||
:root,
|
||||
:host {
|
||||
--chakra-vh: -moz-fill-available;
|
||||
}
|
||||
}
|
||||
|
||||
@supports (height: 100dvh) {
|
||||
:root,
|
||||
:host {
|
||||
--chakra-vh: 100dvh;
|
||||
}
|
||||
}
|
||||
`,B=()=>s.jsx(T,{styles:E}),J=({scope:e=""})=>s.jsx(T,{styles:P`
|
||||
html {
|
||||
line-height: 1.5;
|
||||
-webkit-text-size-adjust: 100%;
|
||||
font-family: system-ui, sans-serif;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
text-rendering: optimizeLegibility;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
touch-action: manipulation;
|
||||
}
|
||||
|
||||
body {
|
||||
position: relative;
|
||||
min-height: 100%;
|
||||
margin: 0;
|
||||
font-feature-settings: "kern";
|
||||
}
|
||||
|
||||
${e} :where(*, *::before, *::after) {
|
||||
border-width: 0;
|
||||
border-style: solid;
|
||||
box-sizing: border-box;
|
||||
word-wrap: break-word;
|
||||
}
|
||||
|
||||
main {
|
||||
display: block;
|
||||
}
|
||||
|
||||
${e} hr {
|
||||
border-top-width: 1px;
|
||||
box-sizing: content-box;
|
||||
height: 0;
|
||||
overflow: visible;
|
||||
}
|
||||
|
||||
${e} :where(pre, code, kbd,samp) {
|
||||
font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace;
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
${e} a {
|
||||
background-color: transparent;
|
||||
color: inherit;
|
||||
text-decoration: inherit;
|
||||
}
|
||||
|
||||
${e} abbr[title] {
|
||||
border-bottom: none;
|
||||
text-decoration: underline;
|
||||
-webkit-text-decoration: underline dotted;
|
||||
text-decoration: underline dotted;
|
||||
}
|
||||
|
||||
${e} :where(b, strong) {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
${e} small {
|
||||
font-size: 80%;
|
||||
}
|
||||
|
||||
${e} :where(sub,sup) {
|
||||
font-size: 75%;
|
||||
line-height: 0;
|
||||
position: relative;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
${e} sub {
|
||||
bottom: -0.25em;
|
||||
}
|
||||
|
||||
${e} sup {
|
||||
top: -0.5em;
|
||||
}
|
||||
|
||||
${e} img {
|
||||
border-style: none;
|
||||
}
|
||||
|
||||
${e} :where(button, input, optgroup, select, textarea) {
|
||||
font-family: inherit;
|
||||
font-size: 100%;
|
||||
line-height: 1.15;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
${e} :where(button, input) {
|
||||
overflow: visible;
|
||||
}
|
||||
|
||||
${e} :where(button, select) {
|
||||
text-transform: none;
|
||||
}
|
||||
|
||||
${e} :where(
|
||||
button::-moz-focus-inner,
|
||||
[type="button"]::-moz-focus-inner,
|
||||
[type="reset"]::-moz-focus-inner,
|
||||
[type="submit"]::-moz-focus-inner
|
||||
) {
|
||||
border-style: none;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
${e} fieldset {
|
||||
padding: 0.35em 0.75em 0.625em;
|
||||
}
|
||||
|
||||
${e} legend {
|
||||
box-sizing: border-box;
|
||||
color: inherit;
|
||||
display: table;
|
||||
max-width: 100%;
|
||||
padding: 0;
|
||||
white-space: normal;
|
||||
}
|
||||
|
||||
${e} progress {
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
${e} textarea {
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
${e} :where([type="checkbox"], [type="radio"]) {
|
||||
box-sizing: border-box;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
${e} input[type="number"]::-webkit-inner-spin-button,
|
||||
${e} input[type="number"]::-webkit-outer-spin-button {
|
||||
-webkit-appearance: none !important;
|
||||
}
|
||||
|
||||
${e} input[type="number"] {
|
||||
-moz-appearance: textfield;
|
||||
}
|
||||
|
||||
${e} input[type="search"] {
|
||||
-webkit-appearance: textfield;
|
||||
outline-offset: -2px;
|
||||
}
|
||||
|
||||
${e} input[type="search"]::-webkit-search-decoration {
|
||||
-webkit-appearance: none !important;
|
||||
}
|
||||
|
||||
${e} ::-webkit-file-upload-button {
|
||||
-webkit-appearance: button;
|
||||
font: inherit;
|
||||
}
|
||||
|
||||
${e} details {
|
||||
display: block;
|
||||
}
|
||||
|
||||
${e} summary {
|
||||
display: list-item;
|
||||
}
|
||||
|
||||
template {
|
||||
display: none;
|
||||
}
|
||||
|
||||
[hidden] {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
${e} :where(
|
||||
blockquote,
|
||||
dl,
|
||||
dd,
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6,
|
||||
hr,
|
||||
figure,
|
||||
p,
|
||||
pre
|
||||
) {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
${e} button {
|
||||
background: transparent;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
${e} fieldset {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
${e} :where(ol, ul) {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
${e} textarea {
|
||||
resize: vertical;
|
||||
}
|
||||
|
||||
${e} :where(button, [role="button"]) {
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
${e} button::-moz-focus-inner {
|
||||
border: 0 !important;
|
||||
}
|
||||
|
||||
${e} table {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
${e} :where(h1, h2, h3, h4, h5, h6) {
|
||||
font-size: inherit;
|
||||
font-weight: inherit;
|
||||
}
|
||||
|
||||
${e} :where(button, input, optgroup, select, textarea) {
|
||||
padding: 0;
|
||||
line-height: inherit;
|
||||
color: inherit;
|
||||
}
|
||||
|
||||
${e} :where(img, svg, video, canvas, audio, iframe, embed, object) {
|
||||
display: block;
|
||||
}
|
||||
|
||||
${e} :where(img, video) {
|
||||
max-width: 100%;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
[data-js-focus-visible]
|
||||
:focus:not([data-focus-visible-added]):not(
|
||||
[data-focus-visible-disabled]
|
||||
) {
|
||||
outline: none;
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
${e} select::-ms-expand {
|
||||
display: none;
|
||||
}
|
||||
|
||||
${E}
|
||||
`}),g={light:"chakra-ui-light",dark:"chakra-ui-dark"};function Q(e={}){const{preventTransition:o=!0}=e,n={setDataset:r=>{const t=o?n.preventTransition():void 0;document.documentElement.dataset.theme=r,document.documentElement.style.colorScheme=r,t==null||t()},setClassName(r){document.body.classList.add(r?g.dark:g.light),document.body.classList.remove(r?g.light:g.dark)},query(){return window.matchMedia("(prefers-color-scheme: dark)")},getSystemTheme(r){var t;return((t=n.query().matches)!=null?t:r==="dark")?"dark":"light"},addListener(r){const t=n.query(),i=a=>{r(a.matches?"dark":"light")};return typeof t.addListener=="function"?t.addListener(i):t.addEventListener("change",i),()=>{typeof t.removeListener=="function"?t.removeListener(i):t.removeEventListener("change",i)}},preventTransition(){const r=document.createElement("style");return r.appendChild(document.createTextNode("*{-webkit-transition:none!important;-moz-transition:none!important;-o-transition:none!important;-ms-transition:none!important;transition:none!important}")),document.head.appendChild(r),()=>{window.getComputedStyle(document.body),requestAnimationFrame(()=>{requestAnimationFrame(()=>{document.head.removeChild(r)})})}}};return n}var X="chakra-ui-color-mode";function L(e){return{ssr:!1,type:"localStorage",get(o){if(!(globalThis!=null&&globalThis.document))return o;let n;try{n=localStorage.getItem(e)||o}catch{}return n||o},set(o){try{localStorage.setItem(e,o)}catch{}}}}var ee=L(X),M=()=>{};function S(e,o){return e.type==="cookie"&&e.ssr?e.get(o):o}function O(e){const{value:o,children:n,options:{useSystemColorMode:r,initialColorMode:t,disableTransitionOnChange:i}={},colorModeManager:a=ee}=e,d=t==="dark"?"dark":"light",[u,p]=l.useState(()=>S(a,d)),[y,b]=l.useState(()=>S(a)),{getSystemTheme:w,setClassName:k,setDataset:x,addListener:$}=l.useMemo(()=>Q({preventTransition:i}),[i]),v=t==="system"&&!u?y:u,c=l.useCallback(m=>{const f=m==="system"?w():m;p(f),k(f==="dark"),x(f),a.set(f)},[a,w,k,x]);A(()=>{t==="system"&&b(w())},[]),l.useEffect(()=>{const m=a.get();if(m){c(m);return}if(t==="system"){c("system");return}c(d)},[a,d,t,c]);const C=l.useCallback(()=>{c(v==="dark"?"light":"dark")},[v,c]);l.useEffect(()=>{if(r)return $(c)},[r,$,c]);const N=l.useMemo(()=>({colorMode:o??v,toggleColorMode:o?M:C,setColorMode:o?M:c,forced:o!==void 0}),[v,C,c,o]);return s.jsx(R.Provider,{value:N,children:n})}O.displayName="ColorModeProvider";var te=["borders","breakpoints","colors","components","config","direction","fonts","fontSizes","fontWeights","letterSpacings","lineHeights","radii","shadows","sizes","space","styles","transition","zIndices"];function re(e){return V(e)?te.every(o=>Object.prototype.hasOwnProperty.call(e,o)):!1}function h(e){return typeof e=="function"}function oe(...e){return o=>e.reduce((n,r)=>r(n),o)}var ne=e=>function(...n){let r=[...n],t=n[n.length-1];return re(t)&&r.length>1?r=r.slice(0,r.length-1):t=e,oe(...r.map(i=>a=>h(i)?i(a):ae(a,i)))(t)},ie=ne(j);function ae(...e){return z({},...e,_)}function _(e,o,n,r){if((h(e)||h(o))&&Object.prototype.hasOwnProperty.call(r,n))return(...t)=>{const i=h(e)?e(...t):e,a=h(o)?o(...t):o;return z({},i,a,_)}}var q=l.createContext({getDocument(){return document},getWindow(){return window}});q.displayName="EnvironmentContext";function I(e){const{children:o,environment:n,disabled:r}=e,t=l.useRef(null),i=l.useMemo(()=>n||{getDocument:()=>{var d,u;return(u=(d=t.current)==null?void 0:d.ownerDocument)!=null?u:document},getWindow:()=>{var d,u;return(u=(d=t.current)==null?void 0:d.ownerDocument.defaultView)!=null?u:window}},[n]),a=!r||!n;return s.jsxs(q.Provider,{value:i,children:[o,a&&s.jsx("span",{id:"__chakra_env",hidden:!0,ref:t})]})}I.displayName="EnvironmentProvider";var se=e=>{const{children:o,colorModeManager:n,portalZIndex:r,resetScope:t,resetCSS:i=!0,theme:a={},environment:d,cssVarsRoot:u,disableEnvironment:p,disableGlobalStyle:y}=e,b=s.jsx(I,{environment:d,disabled:p,children:o});return s.jsx(D,{theme:a,cssVarsRoot:u,children:s.jsxs(O,{colorModeManager:n,options:a.config,children:[i?s.jsx(J,{scope:t}):s.jsx(B,{}),!y&&s.jsx(F,{}),r?s.jsx(G,{zIndex:r,children:b}):b]})})},le=e=>function({children:n,theme:r=e,toastOptions:t,...i}){return s.jsxs(se,{theme:r,...i,children:[s.jsx(W,{value:t==null?void 0:t.defaultOptions,children:n}),s.jsx(K,{...t})]})},de=le(j);const ue=()=>l.useMemo(()=>({colorScheme:"dark",fontFamily:"'Inter Variable', sans-serif",components:{ScrollArea:{defaultProps:{scrollbarSize:10},styles:{scrollbar:{"&:hover":{backgroundColor:"var(--invokeai-colors-baseAlpha-300)"}},thumb:{backgroundColor:"var(--invokeai-colors-baseAlpha-300)"}}}}}),[]),ce=L("@@invokeai-color-mode");function me({children:e}){const{i18n:o}=H(),n=o.dir(),r=l.useMemo(()=>ie({...U,direction:n}),[n]);l.useEffect(()=>{document.body.dir=n},[n]);const t=ue();return s.jsx(Z,{theme:t,children:s.jsx(de,{theme:r,colorModeManager:ce,toastOptions:Y,children:e})})}const ve=l.memo(me);export{ve as default};
|
||||
9
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-f5f9aabf.css
vendored
Normal file
9
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-f5f9aabf.css
vendored
Normal file
File diff suppressed because one or more lines are too long
BIN
invokeai/frontend/web/dist/assets/favicon-0d253ced.ico
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/favicon-0d253ced.ico
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 116 KiB |
157
invokeai/frontend/web/dist/assets/index-f820e2e3.js
vendored
Normal file
157
invokeai/frontend/web/dist/assets/index-f820e2e3.js
vendored
Normal file
File diff suppressed because one or more lines are too long
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-wght-normal-1c3007b8.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-wght-normal-1c3007b8.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-wght-normal-eba94878.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-wght-normal-eba94878.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-wght-normal-81f77e51.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-wght-normal-81f77e51.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-wght-normal-d92c6cbc.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-wght-normal-d92c6cbc.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-wght-normal-a2bfd9fe.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-wght-normal-a2bfd9fe.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-wght-normal-88df0b5a.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-wght-normal-88df0b5a.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-vietnamese-wght-normal-15df7612.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-vietnamese-wght-normal-15df7612.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/logo-13003d72.png
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/logo-13003d72.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 43 KiB |
25
invokeai/frontend/web/dist/index.html
vendored
Normal file
25
invokeai/frontend/web/dist/index.html
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate">
|
||||
<meta http-equiv="Pragma" content="no-cache">
|
||||
<meta http-equiv="Expires" content="0">
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
|
||||
<style>
|
||||
html,
|
||||
body {
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
<script type="module" crossorigin src="./assets/index-f820e2e3.js"></script>
|
||||
</head>
|
||||
|
||||
<body dir="ltr">
|
||||
<div id="root"></div>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
504
invokeai/frontend/web/dist/locales/ar.json
vendored
Normal file
504
invokeai/frontend/web/dist/locales/ar.json
vendored
Normal file
@@ -0,0 +1,504 @@
|
||||
{
|
||||
"common": {
|
||||
"hotkeysLabel": "مفاتيح الأختصار",
|
||||
"languagePickerLabel": "منتقي اللغة",
|
||||
"reportBugLabel": "بلغ عن خطأ",
|
||||
"settingsLabel": "إعدادات",
|
||||
"img2img": "صورة إلى صورة",
|
||||
"unifiedCanvas": "لوحة موحدة",
|
||||
"nodes": "عقد",
|
||||
"langArabic": "العربية",
|
||||
"nodesDesc": "نظام مبني على العقد لإنتاج الصور قيد التطوير حاليًا. تبقى على اتصال مع تحديثات حول هذه الميزة المذهلة.",
|
||||
"postProcessing": "معالجة بعد الإصدار",
|
||||
"postProcessDesc1": "Invoke AI توفر مجموعة واسعة من ميزات المعالجة بعد الإصدار. تحسين الصور واستعادة الوجوه متاحين بالفعل في واجهة الويب. يمكنك الوصول إليهم من الخيارات المتقدمة في قائمة الخيارات في علامة التبويب Text To Image و Image To Image. يمكن أيضًا معالجة الصور مباشرةً باستخدام أزرار الإجراء على الصورة فوق عرض الصورة الحالي أو في العارض.",
|
||||
"postProcessDesc2": "سيتم إصدار واجهة رسومية مخصصة قريبًا لتسهيل عمليات المعالجة بعد الإصدار المتقدمة.",
|
||||
"postProcessDesc3": "واجهة سطر الأوامر Invoke AI توفر ميزات أخرى عديدة بما في ذلك Embiggen.",
|
||||
"training": "تدريب",
|
||||
"trainingDesc1": "تدفق خاص مخصص لتدريب تضميناتك الخاصة ونقاط التحقق باستخدام العكس النصي و دريم بوث من واجهة الويب.",
|
||||
"trainingDesc2": " استحضر الذكاء الصناعي يدعم بالفعل تدريب تضمينات مخصصة باستخدام العكس النصي باستخدام السكريبت الرئيسي.",
|
||||
"upload": "رفع",
|
||||
"close": "إغلاق",
|
||||
"load": "تحميل",
|
||||
"back": "الى الخلف",
|
||||
"statusConnected": "متصل",
|
||||
"statusDisconnected": "غير متصل",
|
||||
"statusError": "خطأ",
|
||||
"statusPreparing": "جاري التحضير",
|
||||
"statusProcessingCanceled": "تم إلغاء المعالجة",
|
||||
"statusProcessingComplete": "اكتمال المعالجة",
|
||||
"statusGenerating": "جاري التوليد",
|
||||
"statusGeneratingTextToImage": "جاري توليد النص إلى الصورة",
|
||||
"statusGeneratingImageToImage": "جاري توليد الصورة إلى الصورة",
|
||||
"statusGeneratingInpainting": "جاري توليد Inpainting",
|
||||
"statusGeneratingOutpainting": "جاري توليد Outpainting",
|
||||
"statusGenerationComplete": "اكتمال التوليد",
|
||||
"statusIterationComplete": "اكتمال التكرار",
|
||||
"statusSavingImage": "جاري حفظ الصورة",
|
||||
"statusRestoringFaces": "جاري استعادة الوجوه",
|
||||
"statusRestoringFacesGFPGAN": "تحسيت الوجوه (جي إف بي جان)",
|
||||
"statusRestoringFacesCodeFormer": "تحسين الوجوه (كود فورمر)",
|
||||
"statusUpscaling": "تحسين الحجم",
|
||||
"statusUpscalingESRGAN": "تحسين الحجم (إي إس آر جان)",
|
||||
"statusLoadingModel": "تحميل النموذج",
|
||||
"statusModelChanged": "تغير النموذج"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "الأجيال",
|
||||
"showGenerations": "عرض الأجيال",
|
||||
"uploads": "التحميلات",
|
||||
"showUploads": "عرض التحميلات",
|
||||
"galleryImageSize": "حجم الصورة",
|
||||
"galleryImageResetSize": "إعادة ضبط الحجم",
|
||||
"gallerySettings": "إعدادات المعرض",
|
||||
"maintainAspectRatio": "الحفاظ على نسبة الأبعاد",
|
||||
"autoSwitchNewImages": "التبديل التلقائي إلى الصور الجديدة",
|
||||
"singleColumnLayout": "تخطيط عمود واحد",
|
||||
"allImagesLoaded": "تم تحميل جميع الصور",
|
||||
"loadMore": "تحميل المزيد",
|
||||
"noImagesInGallery": "لا توجد صور في المعرض"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "مفاتيح الأزرار المختصرة",
|
||||
"appHotkeys": "مفاتيح التطبيق",
|
||||
"generalHotkeys": "مفاتيح عامة",
|
||||
"galleryHotkeys": "مفاتيح المعرض",
|
||||
"unifiedCanvasHotkeys": "مفاتيح اللوحةالموحدة ",
|
||||
"invoke": {
|
||||
"title": "أدعو",
|
||||
"desc": "إنشاء صورة"
|
||||
},
|
||||
"cancel": {
|
||||
"title": "إلغاء",
|
||||
"desc": "إلغاء إنشاء الصورة"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "تركيز الإشعار",
|
||||
"desc": "تركيز منطقة الإدخال الإشعار"
|
||||
},
|
||||
"toggleOptions": {
|
||||
"title": "تبديل الخيارات",
|
||||
"desc": "فتح وإغلاق لوحة الخيارات"
|
||||
},
|
||||
"pinOptions": {
|
||||
"title": "خيارات التثبيت",
|
||||
"desc": "ثبت لوحة الخيارات"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "تبديل العارض",
|
||||
"desc": "فتح وإغلاق مشاهد الصور"
|
||||
},
|
||||
"toggleGallery": {
|
||||
"title": "تبديل المعرض",
|
||||
"desc": "فتح وإغلاق درابزين المعرض"
|
||||
},
|
||||
"maximizeWorkSpace": {
|
||||
"title": "تكبير مساحة العمل",
|
||||
"desc": "إغلاق اللوحات وتكبير مساحة العمل"
|
||||
},
|
||||
"changeTabs": {
|
||||
"title": "تغيير الألسنة",
|
||||
"desc": "التبديل إلى مساحة عمل أخرى"
|
||||
},
|
||||
"consoleToggle": {
|
||||
"title": "تبديل الطرفية",
|
||||
"desc": "فتح وإغلاق الطرفية"
|
||||
},
|
||||
"setPrompt": {
|
||||
"title": "ضبط التشعب",
|
||||
"desc": "استخدم تشعب الصورة الحالية"
|
||||
},
|
||||
"setSeed": {
|
||||
"title": "ضبط البذور",
|
||||
"desc": "استخدم بذور الصورة الحالية"
|
||||
},
|
||||
"setParameters": {
|
||||
"title": "ضبط المعلمات",
|
||||
"desc": "استخدم جميع المعلمات الخاصة بالصورة الحالية"
|
||||
},
|
||||
"restoreFaces": {
|
||||
"title": "استعادة الوجوه",
|
||||
"desc": "استعادة الصورة الحالية"
|
||||
},
|
||||
"upscale": {
|
||||
"title": "تحسين الحجم",
|
||||
"desc": "تحسين حجم الصورة الحالية"
|
||||
},
|
||||
"showInfo": {
|
||||
"title": "عرض المعلومات",
|
||||
"desc": "عرض معلومات البيانات الخاصة بالصورة الحالية"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "أرسل إلى صورة إلى صورة",
|
||||
"desc": "أرسل الصورة الحالية إلى صورة إلى صورة"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "حذف الصورة",
|
||||
"desc": "حذف الصورة الحالية"
|
||||
},
|
||||
"closePanels": {
|
||||
"title": "أغلق اللوحات",
|
||||
"desc": "يغلق اللوحات المفتوحة"
|
||||
},
|
||||
"previousImage": {
|
||||
"title": "الصورة السابقة",
|
||||
"desc": "عرض الصورة السابقة في الصالة"
|
||||
},
|
||||
"nextImage": {
|
||||
"title": "الصورة التالية",
|
||||
"desc": "عرض الصورة التالية في الصالة"
|
||||
},
|
||||
"toggleGalleryPin": {
|
||||
"title": "تبديل تثبيت الصالة",
|
||||
"desc": "يثبت ويفتح تثبيت الصالة على الواجهة الرسومية"
|
||||
},
|
||||
"increaseGalleryThumbSize": {
|
||||
"title": "زيادة حجم صورة الصالة",
|
||||
"desc": "يزيد حجم الصور المصغرة في الصالة"
|
||||
},
|
||||
"decreaseGalleryThumbSize": {
|
||||
"title": "انقاص حجم صورة الصالة",
|
||||
"desc": "ينقص حجم الصور المصغرة في الصالة"
|
||||
},
|
||||
"selectBrush": {
|
||||
"title": "تحديد الفرشاة",
|
||||
"desc": "يحدد الفرشاة على اللوحة"
|
||||
},
|
||||
"selectEraser": {
|
||||
"title": "تحديد الممحاة",
|
||||
"desc": "يحدد الممحاة على اللوحة"
|
||||
},
|
||||
"decreaseBrushSize": {
|
||||
"title": "تصغير حجم الفرشاة",
|
||||
"desc": "يصغر حجم الفرشاة/الممحاة على اللوحة"
|
||||
},
|
||||
"increaseBrushSize": {
|
||||
"title": "زيادة حجم الفرشاة",
|
||||
"desc": "يزيد حجم فرشة اللوحة / الممحاة"
|
||||
},
|
||||
"decreaseBrushOpacity": {
|
||||
"title": "تخفيض شفافية الفرشاة",
|
||||
"desc": "يخفض شفافية فرشة اللوحة"
|
||||
},
|
||||
"increaseBrushOpacity": {
|
||||
"title": "زيادة شفافية الفرشاة",
|
||||
"desc": "يزيد شفافية فرشة اللوحة"
|
||||
},
|
||||
"moveTool": {
|
||||
"title": "أداة التحريك",
|
||||
"desc": "يتيح التحرك في اللوحة"
|
||||
},
|
||||
"fillBoundingBox": {
|
||||
"title": "ملء الصندوق المحدد",
|
||||
"desc": "يملأ الصندوق المحدد بلون الفرشاة"
|
||||
},
|
||||
"eraseBoundingBox": {
|
||||
"title": "محو الصندوق المحدد",
|
||||
"desc": "يمحو منطقة الصندوق المحدد"
|
||||
},
|
||||
"colorPicker": {
|
||||
"title": "اختيار منتقي اللون",
|
||||
"desc": "يختار منتقي اللون الخاص باللوحة"
|
||||
},
|
||||
"toggleSnap": {
|
||||
"title": "تبديل التأكيد",
|
||||
"desc": "يبديل تأكيد الشبكة"
|
||||
},
|
||||
"quickToggleMove": {
|
||||
"title": "تبديل سريع للتحريك",
|
||||
"desc": "يبديل مؤقتا وضع التحريك"
|
||||
},
|
||||
"toggleLayer": {
|
||||
"title": "تبديل الطبقة",
|
||||
"desc": "يبديل إختيار الطبقة القناع / الأساسية"
|
||||
},
|
||||
"clearMask": {
|
||||
"title": "مسح القناع",
|
||||
"desc": "مسح القناع بأكمله"
|
||||
},
|
||||
"hideMask": {
|
||||
"title": "إخفاء الكمامة",
|
||||
"desc": "إخفاء وإظهار الكمامة"
|
||||
},
|
||||
"showHideBoundingBox": {
|
||||
"title": "إظهار / إخفاء علبة التحديد",
|
||||
"desc": "تبديل ظهور علبة التحديد"
|
||||
},
|
||||
"mergeVisible": {
|
||||
"title": "دمج الطبقات الظاهرة",
|
||||
"desc": "دمج جميع الطبقات الظاهرة في اللوحة"
|
||||
},
|
||||
"saveToGallery": {
|
||||
"title": "حفظ إلى صالة الأزياء",
|
||||
"desc": "حفظ اللوحة الحالية إلى صالة الأزياء"
|
||||
},
|
||||
"copyToClipboard": {
|
||||
"title": "نسخ إلى الحافظة",
|
||||
"desc": "نسخ اللوحة الحالية إلى الحافظة"
|
||||
},
|
||||
"downloadImage": {
|
||||
"title": "تنزيل الصورة",
|
||||
"desc": "تنزيل اللوحة الحالية"
|
||||
},
|
||||
"undoStroke": {
|
||||
"title": "تراجع عن الخط",
|
||||
"desc": "تراجع عن خط الفرشاة"
|
||||
},
|
||||
"redoStroke": {
|
||||
"title": "إعادة الخط",
|
||||
"desc": "إعادة خط الفرشاة"
|
||||
},
|
||||
"resetView": {
|
||||
"title": "إعادة تعيين العرض",
|
||||
"desc": "إعادة تعيين عرض اللوحة"
|
||||
},
|
||||
"previousStagingImage": {
|
||||
"title": "الصورة السابقة في المرحلة التجريبية",
|
||||
"desc": "الصورة السابقة في منطقة المرحلة التجريبية"
|
||||
},
|
||||
"nextStagingImage": {
|
||||
"title": "الصورة التالية في المرحلة التجريبية",
|
||||
"desc": "الصورة التالية في منطقة المرحلة التجريبية"
|
||||
},
|
||||
"acceptStagingImage": {
|
||||
"title": "قبول الصورة في المرحلة التجريبية",
|
||||
"desc": "قبول الصورة الحالية في منطقة المرحلة التجريبية"
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "مدير النموذج",
|
||||
"model": "نموذج",
|
||||
"allModels": "جميع النماذج",
|
||||
"checkpointModels": "نقاط التحقق",
|
||||
"diffusersModels": "المصادر المتعددة",
|
||||
"safetensorModels": "التنسورات الآمنة",
|
||||
"modelAdded": "تمت إضافة النموذج",
|
||||
"modelUpdated": "تم تحديث النموذج",
|
||||
"modelEntryDeleted": "تم حذف مدخل النموذج",
|
||||
"cannotUseSpaces": "لا يمكن استخدام المساحات",
|
||||
"addNew": "إضافة جديد",
|
||||
"addNewModel": "إضافة نموذج جديد",
|
||||
"addCheckpointModel": "إضافة نقطة تحقق / نموذج التنسور الآمن",
|
||||
"addDiffuserModel": "إضافة مصادر متعددة",
|
||||
"addManually": "إضافة يدويًا",
|
||||
"manual": "يدوي",
|
||||
"name": "الاسم",
|
||||
"nameValidationMsg": "أدخل اسما لنموذجك",
|
||||
"description": "الوصف",
|
||||
"descriptionValidationMsg": "أضف وصفا لنموذجك",
|
||||
"config": "تكوين",
|
||||
"configValidationMsg": "مسار الملف الإعدادي لنموذجك.",
|
||||
"modelLocation": "موقع النموذج",
|
||||
"modelLocationValidationMsg": "موقع النموذج على الجهاز الخاص بك.",
|
||||
"repo_id": "معرف المستودع",
|
||||
"repoIDValidationMsg": "المستودع الإلكتروني لنموذجك",
|
||||
"vaeLocation": "موقع فاي إي",
|
||||
"vaeLocationValidationMsg": "موقع فاي إي على الجهاز الخاص بك.",
|
||||
"vaeRepoID": "معرف مستودع فاي إي",
|
||||
"vaeRepoIDValidationMsg": "المستودع الإلكتروني فاي إي",
|
||||
"width": "عرض",
|
||||
"widthValidationMsg": "عرض افتراضي لنموذجك.",
|
||||
"height": "ارتفاع",
|
||||
"heightValidationMsg": "ارتفاع افتراضي لنموذجك.",
|
||||
"addModel": "أضف نموذج",
|
||||
"updateModel": "تحديث النموذج",
|
||||
"availableModels": "النماذج المتاحة",
|
||||
"search": "بحث",
|
||||
"load": "تحميل",
|
||||
"active": "نشط",
|
||||
"notLoaded": "غير محمل",
|
||||
"cached": "مخبأ",
|
||||
"checkpointFolder": "مجلد التدقيق",
|
||||
"clearCheckpointFolder": "مسح مجلد التدقيق",
|
||||
"findModels": "إيجاد النماذج",
|
||||
"scanAgain": "فحص مرة أخرى",
|
||||
"modelsFound": "النماذج الموجودة",
|
||||
"selectFolder": "حدد المجلد",
|
||||
"selected": "تم التحديد",
|
||||
"selectAll": "حدد الكل",
|
||||
"deselectAll": "إلغاء تحديد الكل",
|
||||
"showExisting": "إظهار الموجود",
|
||||
"addSelected": "أضف المحدد",
|
||||
"modelExists": "النموذج موجود",
|
||||
"selectAndAdd": "حدد وأضف النماذج المدرجة أدناه",
|
||||
"noModelsFound": "لم يتم العثور على نماذج",
|
||||
"delete": "حذف",
|
||||
"deleteModel": "حذف النموذج",
|
||||
"deleteConfig": "حذف التكوين",
|
||||
"deleteMsg1": "هل أنت متأكد من رغبتك في حذف إدخال النموذج هذا من استحضر الذكاء الصناعي",
|
||||
"deleteMsg2": "هذا لن يحذف ملف نقطة التحكم للنموذج من القرص الخاص بك. يمكنك إعادة إضافتهم إذا كنت ترغب في ذلك.",
|
||||
"formMessageDiffusersModelLocation": "موقع النموذج للمصعد",
|
||||
"formMessageDiffusersModelLocationDesc": "يرجى إدخال واحد على الأقل.",
|
||||
"formMessageDiffusersVAELocation": "موقع فاي إي",
|
||||
"formMessageDiffusersVAELocationDesc": "إذا لم يتم توفيره، سيبحث استحضر الذكاء الصناعي عن ملف فاي إي داخل موقع النموذج المعطى أعلاه."
|
||||
},
|
||||
"parameters": {
|
||||
"images": "الصور",
|
||||
"steps": "الخطوات",
|
||||
"cfgScale": "مقياس الإعداد الذاتي للجملة",
|
||||
"width": "عرض",
|
||||
"height": "ارتفاع",
|
||||
"seed": "بذرة",
|
||||
"randomizeSeed": "تبديل بذرة",
|
||||
"shuffle": "تشغيل",
|
||||
"noiseThreshold": "عتبة الضوضاء",
|
||||
"perlinNoise": "ضجيج برلين",
|
||||
"variations": "تباينات",
|
||||
"variationAmount": "كمية التباين",
|
||||
"seedWeights": "أوزان البذور",
|
||||
"faceRestoration": "استعادة الوجه",
|
||||
"restoreFaces": "استعادة الوجوه",
|
||||
"type": "نوع",
|
||||
"strength": "قوة",
|
||||
"upscaling": "تصغير",
|
||||
"upscale": "تصغير",
|
||||
"upscaleImage": "تصغير الصورة",
|
||||
"scale": "مقياس",
|
||||
"otherOptions": "خيارات أخرى",
|
||||
"seamlessTiling": "تجهيز بلاستيكي بدون تشققات",
|
||||
"hiresOptim": "تحسين الدقة العالية",
|
||||
"imageFit": "ملائمة الصورة الأولية لحجم الخرج",
|
||||
"codeformerFidelity": "الوثوقية",
|
||||
"scaleBeforeProcessing": "تحجيم قبل المعالجة",
|
||||
"scaledWidth": "العرض المحجوب",
|
||||
"scaledHeight": "الارتفاع المحجوب",
|
||||
"infillMethod": "طريقة التعبئة",
|
||||
"tileSize": "حجم البلاطة",
|
||||
"boundingBoxHeader": "صندوق التحديد",
|
||||
"seamCorrectionHeader": "تصحيح التشقق",
|
||||
"infillScalingHeader": "التعبئة والتحجيم",
|
||||
"img2imgStrength": "قوة صورة إلى صورة",
|
||||
"toggleLoopback": "تبديل الإعادة",
|
||||
"sendTo": "أرسل إلى",
|
||||
"sendToImg2Img": "أرسل إلى صورة إلى صورة",
|
||||
"sendToUnifiedCanvas": "أرسل إلى الخطوط الموحدة",
|
||||
"copyImage": "نسخ الصورة",
|
||||
"copyImageToLink": "نسخ الصورة إلى الرابط",
|
||||
"downloadImage": "تحميل الصورة",
|
||||
"openInViewer": "فتح في العارض",
|
||||
"closeViewer": "إغلاق العارض",
|
||||
"usePrompt": "استخدم المحث",
|
||||
"useSeed": "استخدام البذور",
|
||||
"useAll": "استخدام الكل",
|
||||
"useInitImg": "استخدام الصورة الأولية",
|
||||
"info": "معلومات",
|
||||
"initialImage": "الصورة الأولية",
|
||||
"showOptionsPanel": "إظهار لوحة الخيارات"
|
||||
},
|
||||
"settings": {
|
||||
"models": "موديلات",
|
||||
"displayInProgress": "عرض الصور المؤرشفة",
|
||||
"saveSteps": "حفظ الصور كل n خطوات",
|
||||
"confirmOnDelete": "تأكيد عند الحذف",
|
||||
"displayHelpIcons": "عرض أيقونات المساعدة",
|
||||
"enableImageDebugging": "تمكين التصحيح عند التصوير",
|
||||
"resetWebUI": "إعادة تعيين واجهة الويب",
|
||||
"resetWebUIDesc1": "إعادة تعيين واجهة الويب يعيد فقط ذاكرة التخزين المؤقت للمتصفح لصورك وإعداداتك المذكورة. لا يحذف أي صور من القرص.",
|
||||
"resetWebUIDesc2": "إذا لم تظهر الصور في الصالة أو إذا كان شيء آخر غير ناجح، يرجى المحاولة إعادة تعيين قبل تقديم مشكلة على جيت هب.",
|
||||
"resetComplete": "تم إعادة تعيين واجهة الويب. تحديث الصفحة لإعادة التحميل."
|
||||
},
|
||||
"toast": {
|
||||
"tempFoldersEmptied": "تم تفريغ مجلد المؤقت",
|
||||
"uploadFailed": "فشل التحميل",
|
||||
"uploadFailedUnableToLoadDesc": "تعذر تحميل الملف",
|
||||
"downloadImageStarted": "بدأ تنزيل الصورة",
|
||||
"imageCopied": "تم نسخ الصورة",
|
||||
"imageLinkCopied": "تم نسخ رابط الصورة",
|
||||
"imageNotLoaded": "لم يتم تحميل أي صورة",
|
||||
"imageNotLoadedDesc": "لم يتم العثور على صورة لإرسالها إلى وحدة الصورة",
|
||||
"imageSavedToGallery": "تم حفظ الصورة في المعرض",
|
||||
"canvasMerged": "تم دمج الخط",
|
||||
"sentToImageToImage": "تم إرسال إلى صورة إلى صورة",
|
||||
"sentToUnifiedCanvas": "تم إرسال إلى لوحة موحدة",
|
||||
"parametersSet": "تم تعيين المعلمات",
|
||||
"parametersNotSet": "لم يتم تعيين المعلمات",
|
||||
"parametersNotSetDesc": "لم يتم العثور على معلمات بيانية لهذه الصورة.",
|
||||
"parametersFailed": "حدث مشكلة في تحميل المعلمات",
|
||||
"parametersFailedDesc": "تعذر تحميل صورة البدء.",
|
||||
"seedSet": "تم تعيين البذرة",
|
||||
"seedNotSet": "لم يتم تعيين البذرة",
|
||||
"seedNotSetDesc": "تعذر العثور على البذرة لهذه الصورة.",
|
||||
"promptSet": "تم تعيين الإشعار",
|
||||
"promptNotSet": "Prompt Not Set",
|
||||
"promptNotSetDesc": "تعذر العثور على الإشعار لهذه الصورة.",
|
||||
"upscalingFailed": "فشل التحسين",
|
||||
"faceRestoreFailed": "فشل استعادة الوجه",
|
||||
"metadataLoadFailed": "فشل تحميل البيانات الوصفية",
|
||||
"initialImageSet": "تم تعيين الصورة الأولية",
|
||||
"initialImageNotSet": "لم يتم تعيين الصورة الأولية",
|
||||
"initialImageNotSetDesc": "تعذر تحميل الصورة الأولية"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
"prompt": "هذا هو حقل التحذير. يشمل التحذير عناصر الإنتاج والمصطلحات الأسلوبية. يمكنك إضافة الأوزان (أهمية الرمز) في التحذير أيضًا، ولكن أوامر CLI والمعلمات لن تعمل.",
|
||||
"gallery": "تعرض Gallery منتجات من مجلد الإخراج عندما يتم إنشاؤها. تخزن الإعدادات داخل الملفات ويتم الوصول إليها عن طريق قائمة السياق.",
|
||||
"other": "ستمكن هذه الخيارات من وضع عمليات معالجة بديلة لـاستحضر الذكاء الصناعي. سيؤدي 'الزخرفة بلا جدران' إلى إنشاء أنماط تكرارية في الإخراج. 'دقة عالية' هي الإنتاج خلال خطوتين عبر صورة إلى صورة: استخدم هذا الإعداد عندما ترغب في توليد صورة أكبر وأكثر تجانبًا دون العيوب. ستستغرق الأشياء وقتًا أطول من نص إلى صورة المعتاد.",
|
||||
"seed": "يؤثر قيمة البذور على الضوضاء الأولي الذي يتم تكوين الصورة منه. يمكنك استخدام البذور الخاصة بالصور السابقة. 'عتبة الضوضاء' يتم استخدامها لتخفيف العناصر الخللية في قيم CFG العالية (جرب مدى 0-10), و Perlin لإضافة ضوضاء Perlin أثناء الإنتاج: كلا منهما يعملان على إضافة التنوع إلى النتائج الخاصة بك.",
|
||||
"variations": "جرب التغيير مع قيمة بين 0.1 و 1.0 لتغيير النتائج لبذور معينة. التغييرات المثيرة للاهتمام للبذور تكون بين 0.1 و 0.3.",
|
||||
"upscale": "استخدم إي إس آر جان لتكبير الصورة على الفور بعد الإنتاج.",
|
||||
"faceCorrection": "تصحيح الوجه باستخدام جي إف بي جان أو كود فورمر: يكتشف الخوارزمية الوجوه في الصورة وتصحح أي عيوب. قيمة عالية ستغير الصورة أكثر، مما يؤدي إلى وجوه أكثر جمالا. كود فورمر بدقة أعلى يحتفظ بالصورة الأصلية على حساب تصحيح وجه أكثر قوة.",
|
||||
"imageToImage": "تحميل صورة إلى صورة أي صورة كأولية، والتي يتم استخدامها لإنشاء صورة جديدة مع التشعيب. كلما كانت القيمة أعلى، كلما تغيرت نتيجة الصورة. من الممكن أن تكون القيم بين 0.0 و 1.0، وتوصي النطاق الموصى به هو .25-.75",
|
||||
"boundingBox": "مربع الحدود هو نفس الإعدادات العرض والارتفاع لنص إلى صورة أو صورة إلى صورة. فقط المنطقة في المربع سيتم معالجتها.",
|
||||
"seamCorrection": "يتحكم بالتعامل مع الخطوط المرئية التي تحدث بين الصور المولدة في سطح اللوحة.",
|
||||
"infillAndScaling": "إدارة أساليب التعبئة (المستخدمة على المناطق المخفية أو الممحوة في سطح اللوحة) والزيادة في الحجم (مفيدة لحجوزات الإطارات الصغيرة)."
|
||||
}
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"layer": "طبقة",
|
||||
"base": "قاعدة",
|
||||
"mask": "قناع",
|
||||
"maskingOptions": "خيارات القناع",
|
||||
"enableMask": "مكن القناع",
|
||||
"preserveMaskedArea": "الحفاظ على المنطقة المقنعة",
|
||||
"clearMask": "مسح القناع",
|
||||
"brush": "فرشاة",
|
||||
"eraser": "ممحاة",
|
||||
"fillBoundingBox": "ملئ إطار الحدود",
|
||||
"eraseBoundingBox": "مسح إطار الحدود",
|
||||
"colorPicker": "اختيار اللون",
|
||||
"brushOptions": "خيارات الفرشاة",
|
||||
"brushSize": "الحجم",
|
||||
"move": "تحريك",
|
||||
"resetView": "إعادة تعيين العرض",
|
||||
"mergeVisible": "دمج الظاهر",
|
||||
"saveToGallery": "حفظ إلى المعرض",
|
||||
"copyToClipboard": "نسخ إلى الحافظة",
|
||||
"downloadAsImage": "تنزيل على شكل صورة",
|
||||
"undo": "تراجع",
|
||||
"redo": "إعادة",
|
||||
"clearCanvas": "مسح سبيكة الكاملة",
|
||||
"canvasSettings": "إعدادات سبيكة الكاملة",
|
||||
"showIntermediates": "إظهار الوسطاء",
|
||||
"showGrid": "إظهار الشبكة",
|
||||
"snapToGrid": "الالتفاف إلى الشبكة",
|
||||
"darkenOutsideSelection": "تعمية خارج التحديد",
|
||||
"autoSaveToGallery": "حفظ تلقائي إلى المعرض",
|
||||
"saveBoxRegionOnly": "حفظ منطقة الصندوق فقط",
|
||||
"limitStrokesToBox": "تحديد عدد الخطوط إلى الصندوق",
|
||||
"showCanvasDebugInfo": "إظهار معلومات تصحيح سبيكة الكاملة",
|
||||
"clearCanvasHistory": "مسح تاريخ سبيكة الكاملة",
|
||||
"clearHistory": "مسح التاريخ",
|
||||
"clearCanvasHistoryMessage": "مسح تاريخ اللوحة تترك اللوحة الحالية عائمة، ولكن تمسح بشكل غير قابل للتراجع تاريخ التراجع والإعادة.",
|
||||
"clearCanvasHistoryConfirm": "هل أنت متأكد من رغبتك في مسح تاريخ اللوحة؟",
|
||||
"emptyTempImageFolder": "إفراغ مجلد الصور المؤقتة",
|
||||
"emptyFolder": "إفراغ المجلد",
|
||||
"emptyTempImagesFolderMessage": "إفراغ مجلد الصور المؤقتة يؤدي أيضًا إلى إعادة تعيين اللوحة الموحدة بشكل كامل. وهذا يشمل كل تاريخ التراجع / الإعادة والصور في منطقة التخزين وطبقة الأساس لللوحة.",
|
||||
"emptyTempImagesFolderConfirm": "هل أنت متأكد من رغبتك في إفراغ مجلد الصور المؤقتة؟",
|
||||
"activeLayer": "الطبقة النشطة",
|
||||
"canvasScale": "مقياس اللوحة",
|
||||
"boundingBox": "صندوق الحدود",
|
||||
"scaledBoundingBox": "صندوق الحدود المكبر",
|
||||
"boundingBoxPosition": "موضع صندوق الحدود",
|
||||
"canvasDimensions": "أبعاد اللوحة",
|
||||
"canvasPosition": "موضع اللوحة",
|
||||
"cursorPosition": "موضع المؤشر",
|
||||
"previous": "السابق",
|
||||
"next": "التالي",
|
||||
"accept": "قبول",
|
||||
"showHide": "إظهار/إخفاء",
|
||||
"discardAll": "تجاهل الكل",
|
||||
"betaClear": "مسح",
|
||||
"betaDarkenOutside": "ظل الخارج",
|
||||
"betaLimitToBox": "تحديد إلى الصندوق",
|
||||
"betaPreserveMasked": "المحافظة على المخفية"
|
||||
}
|
||||
}
|
||||
973
invokeai/frontend/web/dist/locales/de.json
vendored
Normal file
973
invokeai/frontend/web/dist/locales/de.json
vendored
Normal file
@@ -0,0 +1,973 @@
|
||||
{
|
||||
"common": {
|
||||
"languagePickerLabel": "Sprachauswahl",
|
||||
"reportBugLabel": "Fehler melden",
|
||||
"settingsLabel": "Einstellungen",
|
||||
"img2img": "Bild zu Bild",
|
||||
"nodes": "Knoten Editor",
|
||||
"langGerman": "Deutsch",
|
||||
"nodesDesc": "Ein knotenbasiertes System, für die Erzeugung von Bildern, ist derzeit in der Entwicklung. Bleiben Sie gespannt auf Updates zu dieser fantastischen Funktion.",
|
||||
"postProcessing": "Nachbearbeitung",
|
||||
"postProcessDesc1": "InvokeAI bietet eine breite Palette von Nachbearbeitungsfunktionen. Bildhochskalierung und Gesichtsrekonstruktion sind bereits in der WebUI verfügbar. Sie können sie über das Menü Erweiterte Optionen der Reiter Text in Bild und Bild in Bild aufrufen. Sie können Bilder auch direkt bearbeiten, indem Sie die Schaltflächen für Bildaktionen oberhalb der aktuellen Bildanzeige oder im Viewer verwenden.",
|
||||
"postProcessDesc2": "Eine spezielle Benutzeroberfläche wird in Kürze veröffentlicht, um erweiterte Nachbearbeitungs-Workflows zu erleichtern.",
|
||||
"postProcessDesc3": "Die InvokeAI Kommandozeilen-Schnittstelle bietet verschiedene andere Funktionen, darunter Embiggen.",
|
||||
"training": "trainieren",
|
||||
"trainingDesc1": "Ein spezieller Arbeitsablauf zum Trainieren Ihrer eigenen Embeddings und Checkpoints mit Textual Inversion und Dreambooth über die Weboberfläche.",
|
||||
"trainingDesc2": "InvokeAI unterstützt bereits das Training von benutzerdefinierten Embeddings mit Textual Inversion unter Verwendung des Hauptskripts.",
|
||||
"upload": "Hochladen",
|
||||
"close": "Schließen",
|
||||
"load": "Laden",
|
||||
"statusConnected": "Verbunden",
|
||||
"statusDisconnected": "Getrennt",
|
||||
"statusError": "Fehler",
|
||||
"statusPreparing": "Vorbereiten",
|
||||
"statusProcessingCanceled": "Verarbeitung abgebrochen",
|
||||
"statusProcessingComplete": "Verarbeitung komplett",
|
||||
"statusGenerating": "Generieren",
|
||||
"statusGeneratingTextToImage": "Erzeugen von Text zu Bild",
|
||||
"statusGeneratingImageToImage": "Erzeugen von Bild zu Bild",
|
||||
"statusGeneratingInpainting": "Erzeuge Inpainting",
|
||||
"statusGeneratingOutpainting": "Erzeuge Outpainting",
|
||||
"statusGenerationComplete": "Generierung abgeschlossen",
|
||||
"statusIterationComplete": "Iteration abgeschlossen",
|
||||
"statusSavingImage": "Speichere Bild",
|
||||
"statusRestoringFaces": "Gesichter restaurieren",
|
||||
"statusRestoringFacesGFPGAN": "Gesichter restaurieren (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Gesichter restaurieren (CodeFormer)",
|
||||
"statusUpscaling": "Hochskalierung",
|
||||
"statusUpscalingESRGAN": "Hochskalierung (ESRGAN)",
|
||||
"statusLoadingModel": "Laden des Modells",
|
||||
"statusModelChanged": "Modell Geändert",
|
||||
"cancel": "Abbrechen",
|
||||
"accept": "Annehmen",
|
||||
"back": "Zurück",
|
||||
"langEnglish": "Englisch",
|
||||
"langDutch": "Niederländisch",
|
||||
"langFrench": "Französisch",
|
||||
"langItalian": "Italienisch",
|
||||
"langPortuguese": "Portugiesisch",
|
||||
"langRussian": "Russisch",
|
||||
"langUkranian": "Ukrainisch",
|
||||
"hotkeysLabel": "Tastenkombinationen",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"txt2img": "Text zu Bild",
|
||||
"postprocessing": "Nachbearbeitung",
|
||||
"langPolish": "Polnisch",
|
||||
"langJapanese": "Japanisch",
|
||||
"langArabic": "Arabisch",
|
||||
"langKorean": "Koreanisch",
|
||||
"langHebrew": "Hebräisch",
|
||||
"langSpanish": "Spanisch",
|
||||
"t2iAdapter": "T2I Adapter",
|
||||
"communityLabel": "Gemeinschaft",
|
||||
"dontAskMeAgain": "Frag mich nicht nochmal",
|
||||
"loadingInvokeAI": "Lade Invoke AI",
|
||||
"statusMergedModels": "Modelle zusammengeführt",
|
||||
"areYouSure": "Bist du dir sicher?",
|
||||
"statusConvertingModel": "Model konvertieren",
|
||||
"on": "An",
|
||||
"nodeEditor": "Knoten Editor",
|
||||
"statusMergingModels": "Modelle zusammenführen",
|
||||
"langSimplifiedChinese": "Vereinfachtes Chinesisch",
|
||||
"ipAdapter": "IP Adapter",
|
||||
"controlAdapter": "Control Adapter",
|
||||
"auto": "Automatisch",
|
||||
"controlNet": "ControlNet",
|
||||
"imageFailedToLoad": "Kann Bild nicht laden",
|
||||
"statusModelConverted": "Model konvertiert",
|
||||
"modelManager": "Model Manager",
|
||||
"lightMode": "Heller Modus",
|
||||
"generate": "Erstellen",
|
||||
"learnMore": "Mehr lernen",
|
||||
"darkMode": "Dunkler Modus",
|
||||
"loading": "Lade",
|
||||
"random": "Zufall",
|
||||
"batch": "Stapel-Manager",
|
||||
"advanced": "Erweitert",
|
||||
"langBrPortuguese": "Portugiesisch (Brasilien)",
|
||||
"unifiedCanvas": "Einheitliche Leinwand",
|
||||
"openInNewTab": "In einem neuem Tab öffnen",
|
||||
"statusProcessing": "wird bearbeitet",
|
||||
"linear": "Linear",
|
||||
"imagePrompt": "Bild Prompt"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Erzeugungen",
|
||||
"showGenerations": "Zeige Erzeugnisse",
|
||||
"uploads": "Uploads",
|
||||
"showUploads": "Zeige Uploads",
|
||||
"galleryImageSize": "Bildgröße",
|
||||
"galleryImageResetSize": "Größe zurücksetzen",
|
||||
"gallerySettings": "Galerie-Einstellungen",
|
||||
"maintainAspectRatio": "Seitenverhältnis beibehalten",
|
||||
"autoSwitchNewImages": "Automatisch zu neuen Bildern wechseln",
|
||||
"singleColumnLayout": "Einspaltiges Layout",
|
||||
"allImagesLoaded": "Alle Bilder geladen",
|
||||
"loadMore": "Mehr laden",
|
||||
"noImagesInGallery": "Keine Bilder in der Galerie",
|
||||
"loading": "Lade",
|
||||
"preparingDownload": "bereite Download vor",
|
||||
"preparingDownloadFailed": "Problem beim Download vorbereiten",
|
||||
"deleteImage": "Lösche Bild",
|
||||
"images": "Bilder",
|
||||
"copy": "Kopieren",
|
||||
"download": "Runterladen",
|
||||
"setCurrentImage": "Setze aktuelle Bild",
|
||||
"featuresWillReset": "Wenn Sie dieses Bild löschen, werden diese Funktionen sofort zurückgesetzt.",
|
||||
"deleteImageBin": "Gelöschte Bilder werden an den Papierkorb Ihres Betriebssystems gesendet.",
|
||||
"unableToLoad": "Galerie kann nicht geladen werden",
|
||||
"downloadSelection": "Auswahl herunterladen",
|
||||
"currentlyInUse": "Dieses Bild wird derzeit in den folgenden Funktionen verwendet:",
|
||||
"deleteImagePermanent": "Gelöschte Bilder können nicht wiederhergestellt werden.",
|
||||
"autoAssignBoardOnClick": "Board per Klick automatisch zuweisen"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Tastenkürzel",
|
||||
"appHotkeys": "App-Tastenkombinationen",
|
||||
"generalHotkeys": "Allgemeine Tastenkürzel",
|
||||
"galleryHotkeys": "Galerie Tastenkürzel",
|
||||
"unifiedCanvasHotkeys": "Unified Canvas Tastenkürzel",
|
||||
"invoke": {
|
||||
"desc": "Ein Bild erzeugen",
|
||||
"title": "Invoke"
|
||||
},
|
||||
"cancel": {
|
||||
"title": "Abbrechen",
|
||||
"desc": "Bilderzeugung abbrechen"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "Fokussiere Prompt",
|
||||
"desc": "Fokussieren des Eingabefeldes für den Prompt"
|
||||
},
|
||||
"toggleOptions": {
|
||||
"title": "Optionen umschalten",
|
||||
"desc": "Öffnen und Schließen des Optionsfeldes"
|
||||
},
|
||||
"pinOptions": {
|
||||
"title": "Optionen anheften",
|
||||
"desc": "Anheften des Optionsfeldes"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Bildbetrachter umschalten",
|
||||
"desc": "Bildbetrachter öffnen und schließen"
|
||||
},
|
||||
"toggleGallery": {
|
||||
"title": "Galerie umschalten",
|
||||
"desc": "Öffnen und Schließen des Galerie-Schubfachs"
|
||||
},
|
||||
"maximizeWorkSpace": {
|
||||
"title": "Arbeitsbereich maximieren",
|
||||
"desc": "Schließen Sie die Panels und maximieren Sie den Arbeitsbereich"
|
||||
},
|
||||
"changeTabs": {
|
||||
"title": "Tabs wechseln",
|
||||
"desc": "Zu einem anderen Arbeitsbereich wechseln"
|
||||
},
|
||||
"consoleToggle": {
|
||||
"title": "Konsole Umschalten",
|
||||
"desc": "Konsole öffnen und schließen"
|
||||
},
|
||||
"setPrompt": {
|
||||
"title": "Prompt setzen",
|
||||
"desc": "Verwende den Prompt des aktuellen Bildes"
|
||||
},
|
||||
"setSeed": {
|
||||
"title": "Seed setzen",
|
||||
"desc": "Verwende den Seed des aktuellen Bildes"
|
||||
},
|
||||
"setParameters": {
|
||||
"title": "Parameter setzen",
|
||||
"desc": "Alle Parameter des aktuellen Bildes verwenden"
|
||||
},
|
||||
"restoreFaces": {
|
||||
"title": "Gesicht restaurieren",
|
||||
"desc": "Das aktuelle Bild restaurieren"
|
||||
},
|
||||
"upscale": {
|
||||
"title": "Hochskalieren",
|
||||
"desc": "Das aktuelle Bild hochskalieren"
|
||||
},
|
||||
"showInfo": {
|
||||
"title": "Info anzeigen",
|
||||
"desc": "Metadaten des aktuellen Bildes anzeigen"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "An Bild zu Bild senden",
|
||||
"desc": "Aktuelles Bild an Bild zu Bild senden"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "Bild löschen",
|
||||
"desc": "Aktuelles Bild löschen"
|
||||
},
|
||||
"closePanels": {
|
||||
"title": "Panels schließen",
|
||||
"desc": "Schließt offene Panels"
|
||||
},
|
||||
"previousImage": {
|
||||
"title": "Vorheriges Bild",
|
||||
"desc": "Vorheriges Bild in der Galerie anzeigen"
|
||||
},
|
||||
"nextImage": {
|
||||
"title": "Nächstes Bild",
|
||||
"desc": "Nächstes Bild in Galerie anzeigen"
|
||||
},
|
||||
"toggleGalleryPin": {
|
||||
"title": "Galerie anheften umschalten",
|
||||
"desc": "Heftet die Galerie an die Benutzeroberfläche bzw. löst die sie"
|
||||
},
|
||||
"increaseGalleryThumbSize": {
|
||||
"title": "Größe der Galeriebilder erhöhen",
|
||||
"desc": "Vergrößert die Galerie-Miniaturansichten"
|
||||
},
|
||||
"decreaseGalleryThumbSize": {
|
||||
"title": "Größe der Galeriebilder verringern",
|
||||
"desc": "Verringert die Größe der Galerie-Miniaturansichten"
|
||||
},
|
||||
"selectBrush": {
|
||||
"title": "Pinsel auswählen",
|
||||
"desc": "Wählt den Leinwandpinsel aus"
|
||||
},
|
||||
"selectEraser": {
|
||||
"title": "Radiergummi auswählen",
|
||||
"desc": "Wählt den Radiergummi für die Leinwand aus"
|
||||
},
|
||||
"decreaseBrushSize": {
|
||||
"title": "Pinselgröße verkleinern",
|
||||
"desc": "Verringert die Größe des Pinsels/Radiergummis"
|
||||
},
|
||||
"increaseBrushSize": {
|
||||
"title": "Pinselgröße erhöhen",
|
||||
"desc": "Erhöht die Größe des Pinsels/Radiergummis"
|
||||
},
|
||||
"decreaseBrushOpacity": {
|
||||
"title": "Deckkraft des Pinsels vermindern",
|
||||
"desc": "Verringert die Deckkraft des Pinsels"
|
||||
},
|
||||
"increaseBrushOpacity": {
|
||||
"title": "Deckkraft des Pinsels erhöhen",
|
||||
"desc": "Erhöht die Deckkraft des Pinsels"
|
||||
},
|
||||
"moveTool": {
|
||||
"title": "Verschieben Werkzeug",
|
||||
"desc": "Ermöglicht die Navigation auf der Leinwand"
|
||||
},
|
||||
"fillBoundingBox": {
|
||||
"title": "Begrenzungsrahmen füllen",
|
||||
"desc": "Füllt den Begrenzungsrahmen mit Pinselfarbe"
|
||||
},
|
||||
"eraseBoundingBox": {
|
||||
"title": "Begrenzungsrahmen löschen",
|
||||
"desc": "Löscht den Bereich des Begrenzungsrahmens"
|
||||
},
|
||||
"colorPicker": {
|
||||
"title": "Farbpipette",
|
||||
"desc": "Farben aus dem Bild aufnehmen"
|
||||
},
|
||||
"toggleSnap": {
|
||||
"title": "Einrasten umschalten",
|
||||
"desc": "Schaltet Einrasten am Raster ein und aus"
|
||||
},
|
||||
"quickToggleMove": {
|
||||
"title": "Schnell Verschiebemodus",
|
||||
"desc": "Schaltet vorübergehend den Verschiebemodus um"
|
||||
},
|
||||
"toggleLayer": {
|
||||
"title": "Ebene umschalten",
|
||||
"desc": "Schaltet die Auswahl von Maske/Basisebene um"
|
||||
},
|
||||
"clearMask": {
|
||||
"title": "Lösche Maske",
|
||||
"desc": "Die gesamte Maske löschen"
|
||||
},
|
||||
"hideMask": {
|
||||
"title": "Maske ausblenden",
|
||||
"desc": "Maske aus- und einblenden"
|
||||
},
|
||||
"showHideBoundingBox": {
|
||||
"title": "Begrenzungsrahmen ein-/ausblenden",
|
||||
"desc": "Sichtbarkeit des Begrenzungsrahmens ein- und ausschalten"
|
||||
},
|
||||
"mergeVisible": {
|
||||
"title": "Sichtbares Zusammenführen",
|
||||
"desc": "Alle sichtbaren Ebenen der Leinwand zusammenführen"
|
||||
},
|
||||
"saveToGallery": {
|
||||
"title": "In Galerie speichern",
|
||||
"desc": "Aktuelle Leinwand in Galerie speichern"
|
||||
},
|
||||
"copyToClipboard": {
|
||||
"title": "In die Zwischenablage kopieren",
|
||||
"desc": "Aktuelle Leinwand in die Zwischenablage kopieren"
|
||||
},
|
||||
"downloadImage": {
|
||||
"title": "Bild herunterladen",
|
||||
"desc": "Aktuelle Leinwand herunterladen"
|
||||
},
|
||||
"undoStroke": {
|
||||
"title": "Pinselstrich rückgängig machen",
|
||||
"desc": "Einen Pinselstrich rückgängig machen"
|
||||
},
|
||||
"redoStroke": {
|
||||
"title": "Pinselstrich wiederherstellen",
|
||||
"desc": "Einen Pinselstrich wiederherstellen"
|
||||
},
|
||||
"resetView": {
|
||||
"title": "Ansicht zurücksetzen",
|
||||
"desc": "Leinwandansicht zurücksetzen"
|
||||
},
|
||||
"previousStagingImage": {
|
||||
"title": "Vorheriges Staging-Bild",
|
||||
"desc": "Bild des vorherigen Staging-Bereichs"
|
||||
},
|
||||
"nextStagingImage": {
|
||||
"title": "Nächstes Staging-Bild",
|
||||
"desc": "Bild des nächsten Staging-Bereichs"
|
||||
},
|
||||
"acceptStagingImage": {
|
||||
"title": "Staging-Bild akzeptieren",
|
||||
"desc": "Akzeptieren Sie das aktuelle Bild des Staging-Bereichs"
|
||||
},
|
||||
"nodesHotkeys": "Knoten Tastenkürzel",
|
||||
"addNodes": {
|
||||
"title": "Knotenpunkt hinzufügen",
|
||||
"desc": "Öffnet das Menü zum Hinzufügen von Knoten"
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
"modelAdded": "Model hinzugefügt",
|
||||
"modelUpdated": "Model aktualisiert",
|
||||
"modelEntryDeleted": "Modelleintrag gelöscht",
|
||||
"cannotUseSpaces": "Leerzeichen können nicht verwendet werden",
|
||||
"addNew": "Neue hinzufügen",
|
||||
"addNewModel": "Neues Model hinzufügen",
|
||||
"addManually": "Manuell hinzufügen",
|
||||
"nameValidationMsg": "Geben Sie einen Namen für Ihr Model ein",
|
||||
"description": "Beschreibung",
|
||||
"descriptionValidationMsg": "Fügen Sie eine Beschreibung für Ihr Model hinzu",
|
||||
"config": "Konfiguration",
|
||||
"configValidationMsg": "Pfad zur Konfigurationsdatei Ihres Models.",
|
||||
"modelLocation": "Ort des Models",
|
||||
"modelLocationValidationMsg": "Pfad zum Speicherort Ihres Models",
|
||||
"vaeLocation": "VAE Ort",
|
||||
"vaeLocationValidationMsg": "Pfad zum Speicherort Ihres VAE.",
|
||||
"width": "Breite",
|
||||
"widthValidationMsg": "Standardbreite Ihres Models.",
|
||||
"height": "Höhe",
|
||||
"heightValidationMsg": "Standardbhöhe Ihres Models.",
|
||||
"addModel": "Model hinzufügen",
|
||||
"updateModel": "Model aktualisieren",
|
||||
"availableModels": "Verfügbare Models",
|
||||
"search": "Suche",
|
||||
"load": "Laden",
|
||||
"active": "Aktiv",
|
||||
"notLoaded": "nicht geladen",
|
||||
"cached": "zwischengespeichert",
|
||||
"checkpointFolder": "Checkpoint-Ordner",
|
||||
"clearCheckpointFolder": "Checkpoint-Ordner löschen",
|
||||
"findModels": "Models finden",
|
||||
"scanAgain": "Erneut scannen",
|
||||
"modelsFound": "Models gefunden",
|
||||
"selectFolder": "Ordner auswählen",
|
||||
"selected": "Ausgewählt",
|
||||
"selectAll": "Alles auswählen",
|
||||
"deselectAll": "Alle abwählen",
|
||||
"showExisting": "Vorhandene anzeigen",
|
||||
"addSelected": "Auswahl hinzufügen",
|
||||
"modelExists": "Model existiert",
|
||||
"selectAndAdd": "Unten aufgeführte Models auswählen und hinzufügen",
|
||||
"noModelsFound": "Keine Models gefunden",
|
||||
"delete": "Löschen",
|
||||
"deleteModel": "Model löschen",
|
||||
"deleteConfig": "Konfiguration löschen",
|
||||
"deleteMsg1": "Möchten Sie diesen Model-Eintrag wirklich aus InvokeAI löschen?",
|
||||
"deleteMsg2": "Dadurch WIRD das Modell von der Festplatte gelöscht WENN es im InvokeAI Root Ordner liegt. Wenn es in einem anderem Ordner liegt wird das Modell NICHT von der Festplatte gelöscht.",
|
||||
"customConfig": "Benutzerdefinierte Konfiguration",
|
||||
"invokeRoot": "InvokeAI Ordner",
|
||||
"formMessageDiffusersVAELocationDesc": "Falls nicht angegeben, sucht InvokeAI nach der VAE-Datei innerhalb des oben angegebenen Modell Speicherortes.",
|
||||
"checkpointModels": "Kontrollpunkte",
|
||||
"convert": "Umwandeln",
|
||||
"addCheckpointModel": "Kontrollpunkt / SafeTensors Modell hinzufügen",
|
||||
"allModels": "Alle Modelle",
|
||||
"alpha": "Alpha",
|
||||
"addDifference": "Unterschied hinzufügen",
|
||||
"convertToDiffusersHelpText2": "Bei diesem Vorgang wird Ihr Eintrag im Modell-Manager durch die Diffusor-Version desselben Modells ersetzt.",
|
||||
"convertToDiffusersHelpText5": "Bitte stellen Sie sicher, dass Sie über genügend Speicherplatz verfügen. Die Modelle sind in der Regel zwischen 2 GB und 7 GB groß.",
|
||||
"convertToDiffusersHelpText3": "Ihre Kontrollpunktdatei auf der Festplatte wird NICHT gelöscht oder in irgendeiner Weise verändert. Sie können Ihren Kontrollpunkt dem Modell-Manager wieder hinzufügen, wenn Sie dies wünschen.",
|
||||
"convertToDiffusersHelpText4": "Dies ist ein einmaliger Vorgang. Er kann je nach den Spezifikationen Ihres Computers etwa 30-60 Sekunden dauern.",
|
||||
"convertToDiffusersHelpText6": "Möchten Sie dieses Modell konvertieren?",
|
||||
"custom": "Benutzerdefiniert",
|
||||
"modelConverted": "Modell umgewandelt",
|
||||
"inverseSigmoid": "Inverses Sigmoid",
|
||||
"invokeAIFolder": "Invoke AI Ordner",
|
||||
"formMessageDiffusersModelLocationDesc": "Bitte geben Sie mindestens einen an.",
|
||||
"customSaveLocation": "Benutzerdefinierter Speicherort",
|
||||
"formMessageDiffusersVAELocation": "VAE Speicherort",
|
||||
"mergedModelCustomSaveLocation": "Benutzerdefinierter Pfad",
|
||||
"modelMergeHeaderHelp2": "Nur Diffusers sind für die Zusammenführung verfügbar. Wenn Sie ein Kontrollpunktmodell zusammenführen möchten, konvertieren Sie es bitte zuerst in Diffusers.",
|
||||
"manual": "Manuell",
|
||||
"modelManager": "Modell Manager",
|
||||
"modelMergeAlphaHelp": "Alpha steuert die Überblendungsstärke für die Modelle. Niedrigere Alphawerte führen zu einem geringeren Einfluss des zweiten Modells.",
|
||||
"modelMergeHeaderHelp1": "Sie können bis zu drei verschiedene Modelle miteinander kombinieren, um eine Mischung zu erstellen, die Ihren Bedürfnissen entspricht.",
|
||||
"ignoreMismatch": "Unstimmigkeiten zwischen ausgewählten Modellen ignorieren",
|
||||
"model": "Modell",
|
||||
"convertToDiffusersSaveLocation": "Speicherort",
|
||||
"pathToCustomConfig": "Pfad zur benutzerdefinierten Konfiguration",
|
||||
"v1": "v1",
|
||||
"modelMergeInterpAddDifferenceHelp": "In diesem Modus wird zunächst Modell 3 von Modell 2 subtrahiert. Die resultierende Version wird mit Modell 1 mit dem oben eingestellten Alphasatz gemischt.",
|
||||
"modelTwo": "Modell 2",
|
||||
"modelOne": "Modell 1",
|
||||
"v2_base": "v2 (512px)",
|
||||
"scanForModels": "Nach Modellen suchen",
|
||||
"name": "Name",
|
||||
"safetensorModels": "SafeTensors",
|
||||
"pickModelType": "Modell Typ auswählen",
|
||||
"sameFolder": "Gleicher Ordner",
|
||||
"modelThree": "Modell 3",
|
||||
"v2_768": "v2 (768px)",
|
||||
"none": "Nix",
|
||||
"repoIDValidationMsg": "Online Repo Ihres Modells",
|
||||
"vaeRepoIDValidationMsg": "Online Repo Ihrer VAE",
|
||||
"importModels": "Importiere Modelle",
|
||||
"merge": "Zusammenführen",
|
||||
"addDiffuserModel": "Diffusers hinzufügen",
|
||||
"advanced": "Erweitert",
|
||||
"closeAdvanced": "Schließe Erweitert",
|
||||
"convertingModelBegin": "Konvertiere Modell. Bitte warten.",
|
||||
"customConfigFileLocation": "Benutzerdefinierte Konfiguration Datei Speicherort",
|
||||
"baseModel": "Basis Modell",
|
||||
"convertToDiffusers": "Konvertiere zu Diffusers",
|
||||
"diffusersModels": "Diffusers",
|
||||
"noCustomLocationProvided": "Kein benutzerdefinierter Standort angegeben",
|
||||
"onnxModels": "Onnx",
|
||||
"vaeRepoID": "VAE-Repo-ID",
|
||||
"weightedSum": "Gewichtete Summe",
|
||||
"syncModelsDesc": "Wenn Ihre Modelle nicht mit dem Backend synchronisiert sind, können Sie sie mit dieser Option aktualisieren. Dies ist im Allgemeinen praktisch, wenn Sie Ihre models.yaml-Datei manuell aktualisieren oder Modelle zum InvokeAI-Stammordner hinzufügen, nachdem die Anwendung gestartet wurde.",
|
||||
"vae": "VAE",
|
||||
"noModels": "Keine Modelle gefunden",
|
||||
"statusConverting": "Konvertieren",
|
||||
"sigmoid": "Sigmoid",
|
||||
"predictionType": "Vorhersagetyp (für Stable Diffusion 2.x-Modelle und gelegentliche Stable Diffusion 1.x-Modelle)",
|
||||
"selectModel": "Wählen Sie Modell aus",
|
||||
"repo_id": "Repo-ID",
|
||||
"modelSyncFailed": "Modellsynchronisierung fehlgeschlagen",
|
||||
"quickAdd": "Schnell hinzufügen",
|
||||
"simpleModelDesc": "Geben Sie einen Pfad zu einem lokalen Diffusers-Modell, einem lokalen Checkpoint-/Safetensors-Modell, einer HuggingFace-Repo-ID oder einer Checkpoint-/Diffusers-Modell-URL an.",
|
||||
"modelDeleted": "Modell gelöscht",
|
||||
"inpainting": "v1 Ausmalen",
|
||||
"modelUpdateFailed": "Modellaktualisierung fehlgeschlagen",
|
||||
"useCustomConfig": "Benutzerdefinierte Konfiguration verwenden",
|
||||
"settings": "Einstellungen",
|
||||
"modelConversionFailed": "Modellkonvertierung fehlgeschlagen",
|
||||
"syncModels": "Modelle synchronisieren",
|
||||
"mergedModelSaveLocation": "Speicherort",
|
||||
"modelType": "Modelltyp",
|
||||
"modelsMerged": "Modelle zusammengeführt",
|
||||
"modelsMergeFailed": "Modellzusammenführung fehlgeschlagen",
|
||||
"convertToDiffusersHelpText1": "Dieses Modell wird in das 🧨 Diffusers-Format konvertiert.",
|
||||
"modelsSynced": "Modelle synchronisiert",
|
||||
"vaePrecision": "VAE-Präzision",
|
||||
"mergeModels": "Modelle zusammenführen",
|
||||
"interpolationType": "Interpolationstyp",
|
||||
"oliveModels": "Olives",
|
||||
"variant": "Variante",
|
||||
"loraModels": "LoRAs",
|
||||
"modelDeleteFailed": "Modell konnte nicht gelöscht werden",
|
||||
"mergedModelName": "Zusammengeführter Modellname"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Bilder",
|
||||
"steps": "Schritte",
|
||||
"cfgScale": "CFG-Skala",
|
||||
"width": "Breite",
|
||||
"height": "Höhe",
|
||||
"randomizeSeed": "Zufälliger Seed",
|
||||
"shuffle": "Mischen",
|
||||
"noiseThreshold": "Rausch-Schwellenwert",
|
||||
"perlinNoise": "Perlin-Rauschen",
|
||||
"variations": "Variationen",
|
||||
"variationAmount": "Höhe der Abweichung",
|
||||
"seedWeights": "Seed-Gewichte",
|
||||
"faceRestoration": "Gesichtsrestaurierung",
|
||||
"restoreFaces": "Gesichter wiederherstellen",
|
||||
"type": "Art",
|
||||
"strength": "Stärke",
|
||||
"upscaling": "Hochskalierung",
|
||||
"upscale": "Hochskalieren (Shift + U)",
|
||||
"upscaleImage": "Bild hochskalieren",
|
||||
"scale": "Maßstab",
|
||||
"otherOptions": "Andere Optionen",
|
||||
"seamlessTiling": "Nahtlose Kacheln",
|
||||
"hiresOptim": "High-Res-Optimierung",
|
||||
"imageFit": "Ausgangsbild an Ausgabegröße anpassen",
|
||||
"codeformerFidelity": "Glaubwürdigkeit",
|
||||
"scaleBeforeProcessing": "Skalieren vor der Verarbeitung",
|
||||
"scaledWidth": "Skaliert W",
|
||||
"scaledHeight": "Skaliert H",
|
||||
"infillMethod": "Infill-Methode",
|
||||
"tileSize": "Kachelgröße",
|
||||
"boundingBoxHeader": "Begrenzungsrahmen",
|
||||
"seamCorrectionHeader": "Nahtkorrektur",
|
||||
"infillScalingHeader": "Infill und Skalierung",
|
||||
"img2imgStrength": "Bild-zu-Bild-Stärke",
|
||||
"toggleLoopback": "Loopback umschalten",
|
||||
"sendTo": "Senden an",
|
||||
"sendToImg2Img": "Senden an Bild zu Bild",
|
||||
"sendToUnifiedCanvas": "Senden an Unified Canvas",
|
||||
"copyImageToLink": "Bild-Link kopieren",
|
||||
"downloadImage": "Bild herunterladen",
|
||||
"openInViewer": "Im Viewer öffnen",
|
||||
"closeViewer": "Viewer schließen",
|
||||
"usePrompt": "Prompt verwenden",
|
||||
"useSeed": "Seed verwenden",
|
||||
"useAll": "Alle verwenden",
|
||||
"useInitImg": "Ausgangsbild verwenden",
|
||||
"initialImage": "Ursprüngliches Bild",
|
||||
"showOptionsPanel": "Optionsleiste zeigen",
|
||||
"cancel": {
|
||||
"setType": "Abbruchart festlegen",
|
||||
"immediate": "Sofort abbrechen",
|
||||
"schedule": "Abbrechen nach der aktuellen Iteration",
|
||||
"isScheduled": "Abbrechen"
|
||||
},
|
||||
"copyImage": "Bild kopieren",
|
||||
"denoisingStrength": "Stärke der Entrauschung",
|
||||
"symmetry": "Symmetrie",
|
||||
"imageToImage": "Bild zu Bild",
|
||||
"info": "Information",
|
||||
"general": "Allgemein",
|
||||
"hiresStrength": "High Res Stärke",
|
||||
"hidePreview": "Verstecke Vorschau",
|
||||
"showPreview": "Zeige Vorschau"
|
||||
},
|
||||
"settings": {
|
||||
"displayInProgress": "Bilder in Bearbeitung anzeigen",
|
||||
"saveSteps": "Speichern der Bilder alle n Schritte",
|
||||
"confirmOnDelete": "Bestätigen beim Löschen",
|
||||
"displayHelpIcons": "Hilfesymbole anzeigen",
|
||||
"enableImageDebugging": "Bild-Debugging aktivieren",
|
||||
"resetWebUI": "Web-Oberfläche zurücksetzen",
|
||||
"resetWebUIDesc1": "Das Zurücksetzen der Web-Oberfläche setzt nur den lokalen Cache des Browsers mit Ihren Bildern und gespeicherten Einstellungen zurück. Es werden keine Bilder von der Festplatte gelöscht.",
|
||||
"resetWebUIDesc2": "Wenn die Bilder nicht in der Galerie angezeigt werden oder etwas anderes nicht funktioniert, versuchen Sie bitte, die Einstellungen zurückzusetzen, bevor Sie einen Fehler auf GitHub melden.",
|
||||
"resetComplete": "Die Web-Oberfläche wurde zurückgesetzt.",
|
||||
"models": "Modelle",
|
||||
"useSlidersForAll": "Schieberegler für alle Optionen verwenden"
|
||||
},
|
||||
"toast": {
|
||||
"tempFoldersEmptied": "Temp-Ordner geleert",
|
||||
"uploadFailed": "Hochladen fehlgeschlagen",
|
||||
"uploadFailedUnableToLoadDesc": "Datei kann nicht geladen werden",
|
||||
"downloadImageStarted": "Bild wird heruntergeladen",
|
||||
"imageCopied": "Bild kopiert",
|
||||
"imageLinkCopied": "Bildlink kopiert",
|
||||
"imageNotLoaded": "Kein Bild geladen",
|
||||
"imageNotLoadedDesc": "Konnte kein Bild finden",
|
||||
"imageSavedToGallery": "Bild in die Galerie gespeichert",
|
||||
"canvasMerged": "Leinwand zusammengeführt",
|
||||
"sentToImageToImage": "Gesendet an Bild zu Bild",
|
||||
"sentToUnifiedCanvas": "Gesendet an Unified Canvas",
|
||||
"parametersSet": "Parameter festlegen",
|
||||
"parametersNotSet": "Parameter nicht festgelegt",
|
||||
"parametersNotSetDesc": "Keine Metadaten für dieses Bild gefunden.",
|
||||
"parametersFailed": "Problem beim Laden der Parameter",
|
||||
"parametersFailedDesc": "Ausgangsbild kann nicht geladen werden.",
|
||||
"seedSet": "Seed festlegen",
|
||||
"seedNotSet": "Saatgut nicht festgelegt",
|
||||
"seedNotSetDesc": "Für dieses Bild wurde kein Seed gefunden.",
|
||||
"promptSet": "Prompt festgelegt",
|
||||
"promptNotSet": "Prompt nicht festgelegt",
|
||||
"promptNotSetDesc": "Für dieses Bild wurde kein Prompt gefunden.",
|
||||
"upscalingFailed": "Hochskalierung fehlgeschlagen",
|
||||
"faceRestoreFailed": "Gesichtswiederherstellung fehlgeschlagen",
|
||||
"metadataLoadFailed": "Metadaten konnten nicht geladen werden",
|
||||
"initialImageSet": "Ausgangsbild festgelegt",
|
||||
"initialImageNotSet": "Ausgangsbild nicht festgelegt",
|
||||
"initialImageNotSetDesc": "Ausgangsbild konnte nicht geladen werden"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
"prompt": "Dies ist das Prompt-Feld. Ein Prompt enthält Generierungsobjekte und stilistische Begriffe. Sie können auch Gewichtungen (Token-Bedeutung) dem Prompt hinzufügen, aber CLI-Befehle und Parameter funktionieren nicht.",
|
||||
"gallery": "Die Galerie zeigt erzeugte Bilder aus dem Ausgabeordner an, sobald sie erstellt wurden. Die Einstellungen werden in den Dateien gespeichert und können über das Kontextmenü aufgerufen werden.",
|
||||
"other": "Mit diesen Optionen werden alternative Verarbeitungsmodi für InvokeAI aktiviert. 'Nahtlose Kachelung' erzeugt sich wiederholende Muster in der Ausgabe. 'Hohe Auflösungen' werden in zwei Schritten mit img2img erzeugt: Verwenden Sie diese Einstellung, wenn Sie ein größeres und kohärenteres Bild ohne Artefakte wünschen. Es dauert länger als das normale txt2img.",
|
||||
"seed": "Der Seed-Wert beeinflusst das Ausgangsrauschen, aus dem das Bild erstellt wird. Sie können die bereits vorhandenen Seeds von früheren Bildern verwenden. 'Der Rauschschwellenwert' wird verwendet, um Artefakte bei hohen CFG-Werten abzuschwächen (versuchen Sie es im Bereich 0-10), und Perlin, um während der Erzeugung Perlin-Rauschen hinzuzufügen: Beide dienen dazu, Ihre Ergebnisse zu variieren.",
|
||||
"variations": "Versuchen Sie eine Variation mit einem Wert zwischen 0,1 und 1,0, um das Ergebnis für ein bestimmtes Seed zu ändern. Interessante Variationen des Seeds liegen zwischen 0,1 und 0,3.",
|
||||
"upscale": "Verwenden Sie ESRGAN, um das Bild unmittelbar nach der Erzeugung zu vergrößern.",
|
||||
"faceCorrection": "Gesichtskorrektur mit GFPGAN oder Codeformer: Der Algorithmus erkennt Gesichter im Bild und korrigiert alle Fehler. Ein hoher Wert verändert das Bild stärker, was zu attraktiveren Gesichtern führt. Codeformer mit einer höheren Genauigkeit bewahrt das Originalbild auf Kosten einer stärkeren Gesichtskorrektur.",
|
||||
"imageToImage": "Bild zu Bild lädt ein beliebiges Bild als Ausgangsbild, aus dem dann zusammen mit dem Prompt ein neues Bild erzeugt wird. Je höher der Wert ist, desto stärker wird das Ergebnisbild verändert. Werte von 0,0 bis 1,0 sind möglich, der empfohlene Bereich ist .25-.75",
|
||||
"boundingBox": "Der Begrenzungsrahmen ist derselbe wie die Einstellungen für Breite und Höhe bei Text zu Bild oder Bild zu Bild. Es wird nur der Bereich innerhalb des Rahmens verarbeitet.",
|
||||
"seamCorrection": "Steuert die Behandlung von sichtbaren Übergängen, die zwischen den erzeugten Bildern auf der Leinwand auftreten.",
|
||||
"infillAndScaling": "Verwalten Sie Infill-Methoden (für maskierte oder gelöschte Bereiche der Leinwand) und Skalierung (nützlich für kleine Begrenzungsrahmengrößen)."
|
||||
}
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"layer": "Ebene",
|
||||
"base": "Basis",
|
||||
"mask": "Maske",
|
||||
"maskingOptions": "Maskierungsoptionen",
|
||||
"enableMask": "Maske aktivieren",
|
||||
"preserveMaskedArea": "Maskierten Bereich bewahren",
|
||||
"clearMask": "Maske löschen",
|
||||
"brush": "Pinsel",
|
||||
"eraser": "Radierer",
|
||||
"fillBoundingBox": "Begrenzungsrahmen füllen",
|
||||
"eraseBoundingBox": "Begrenzungsrahmen löschen",
|
||||
"colorPicker": "Farbpipette",
|
||||
"brushOptions": "Pinseloptionen",
|
||||
"brushSize": "Größe",
|
||||
"move": "Bewegen",
|
||||
"resetView": "Ansicht zurücksetzen",
|
||||
"mergeVisible": "Sichtbare Zusammenführen",
|
||||
"saveToGallery": "In Galerie speichern",
|
||||
"copyToClipboard": "In Zwischenablage kopieren",
|
||||
"downloadAsImage": "Als Bild herunterladen",
|
||||
"undo": "Rückgängig",
|
||||
"redo": "Wiederherstellen",
|
||||
"clearCanvas": "Leinwand löschen",
|
||||
"canvasSettings": "Leinwand-Einstellungen",
|
||||
"showIntermediates": "Zwischenprodukte anzeigen",
|
||||
"showGrid": "Gitternetz anzeigen",
|
||||
"snapToGrid": "Am Gitternetz einrasten",
|
||||
"darkenOutsideSelection": "Außerhalb der Auswahl verdunkeln",
|
||||
"autoSaveToGallery": "Automatisch in Galerie speichern",
|
||||
"saveBoxRegionOnly": "Nur Auswahlbox speichern",
|
||||
"limitStrokesToBox": "Striche auf Box beschränken",
|
||||
"showCanvasDebugInfo": "Zusätzliche Informationen zur Leinwand anzeigen",
|
||||
"clearCanvasHistory": "Leinwand-Verlauf löschen",
|
||||
"clearHistory": "Verlauf löschen",
|
||||
"clearCanvasHistoryMessage": "Wenn Sie den Verlauf der Leinwand löschen, bleibt die aktuelle Leinwand intakt, aber der Verlauf der Rückgängig- und Wiederherstellung wird unwiderruflich gelöscht.",
|
||||
"clearCanvasHistoryConfirm": "Sind Sie sicher, dass Sie den Verlauf der Leinwand löschen möchten?",
|
||||
"emptyTempImageFolder": "Temp-Image Ordner leeren",
|
||||
"emptyFolder": "Leerer Ordner",
|
||||
"emptyTempImagesFolderMessage": "Wenn Sie den Ordner für temporäre Bilder leeren, wird auch der Unified Canvas vollständig zurückgesetzt. Dies umfasst den gesamten Verlauf der Rückgängig-/Wiederherstellungsvorgänge, die Bilder im Bereitstellungsbereich und die Leinwand-Basisebene.",
|
||||
"emptyTempImagesFolderConfirm": "Sind Sie sicher, dass Sie den temporären Ordner leeren wollen?",
|
||||
"activeLayer": "Aktive Ebene",
|
||||
"canvasScale": "Leinwand Maßstab",
|
||||
"boundingBox": "Begrenzungsrahmen",
|
||||
"scaledBoundingBox": "Skalierter Begrenzungsrahmen",
|
||||
"boundingBoxPosition": "Begrenzungsrahmen Position",
|
||||
"canvasDimensions": "Maße der Leinwand",
|
||||
"canvasPosition": "Leinwandposition",
|
||||
"cursorPosition": "Position des Cursors",
|
||||
"previous": "Vorherige",
|
||||
"next": "Nächste",
|
||||
"accept": "Akzeptieren",
|
||||
"showHide": "Einblenden/Ausblenden",
|
||||
"discardAll": "Alles verwerfen",
|
||||
"betaClear": "Löschen",
|
||||
"betaDarkenOutside": "Außen abdunkeln",
|
||||
"betaLimitToBox": "Begrenzung auf das Feld",
|
||||
"betaPreserveMasked": "Maskiertes bewahren",
|
||||
"antialiasing": "Kantenglättung",
|
||||
"showResultsOn": "Zeige Ergebnisse (An)",
|
||||
"showResultsOff": "Zeige Ergebnisse (Aus)"
|
||||
},
|
||||
"accessibility": {
|
||||
"modelSelect": "Model Auswahl",
|
||||
"uploadImage": "Bild hochladen",
|
||||
"previousImage": "Voriges Bild",
|
||||
"useThisParameter": "Benutze diesen Parameter",
|
||||
"copyMetadataJson": "Kopiere Metadaten JSON",
|
||||
"zoomIn": "Vergrößern",
|
||||
"rotateClockwise": "Im Uhrzeigersinn drehen",
|
||||
"flipHorizontally": "Horizontal drehen",
|
||||
"flipVertically": "Vertikal drehen",
|
||||
"modifyConfig": "Optionen einstellen",
|
||||
"toggleAutoscroll": "Auroscroll ein/ausschalten",
|
||||
"toggleLogViewer": "Log Betrachter ein/ausschalten",
|
||||
"showOptionsPanel": "Zeige Optionen",
|
||||
"reset": "Zurücksetzten",
|
||||
"nextImage": "Nächstes Bild",
|
||||
"zoomOut": "Verkleinern",
|
||||
"rotateCounterClockwise": "Gegen den Uhrzeigersinn verdrehen",
|
||||
"showGalleryPanel": "Galeriefenster anzeigen",
|
||||
"exitViewer": "Betrachten beenden",
|
||||
"menu": "Menü",
|
||||
"loadMore": "Mehr laden",
|
||||
"invokeProgressBar": "Invoke Fortschrittsanzeige"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Automatisches Hinzufügen zum Ordner",
|
||||
"topMessage": "Dieser Ordner enthält Bilder die in den folgenden Funktionen verwendet werden:",
|
||||
"move": "Bewegen",
|
||||
"menuItemAutoAdd": "Automatisches Hinzufügen zu diesem Ordner",
|
||||
"myBoard": "Meine Ordner",
|
||||
"searchBoard": "Ordner durchsuchen...",
|
||||
"noMatching": "Keine passenden Ordner",
|
||||
"selectBoard": "Ordner aussuchen",
|
||||
"cancel": "Abbrechen",
|
||||
"addBoard": "Ordner hinzufügen",
|
||||
"uncategorized": "Nicht kategorisiert",
|
||||
"downloadBoard": "Ordner runterladen",
|
||||
"changeBoard": "Ordner wechseln",
|
||||
"loading": "Laden...",
|
||||
"clearSearch": "Suche leeren",
|
||||
"bottomMessage": "Durch das Löschen dieses Ordners und seiner Bilder werden alle Funktionen zurückgesetzt, die sie derzeit verwenden."
|
||||
},
|
||||
"controlnet": {
|
||||
"showAdvanced": "Zeige Erweitert",
|
||||
"contentShuffleDescription": "Mischt den Inhalt von einem Bild",
|
||||
"addT2IAdapter": "$t(common.t2iAdapter) hinzufügen",
|
||||
"importImageFromCanvas": "Importieren Bild von Zeichenfläche",
|
||||
"lineartDescription": "Konvertiere Bild zu Lineart",
|
||||
"importMaskFromCanvas": "Importiere Maske von Zeichenfläche",
|
||||
"hed": "HED",
|
||||
"hideAdvanced": "Verstecke Erweitert",
|
||||
"contentShuffle": "Inhalt mischen",
|
||||
"controlNetEnabledT2IDisabled": "$t(common.controlNet) ist aktiv, $t(common.t2iAdapter) ist deaktiviert",
|
||||
"ipAdapterModel": "Adapter Modell",
|
||||
"beginEndStepPercent": "Start / Ende Step Prozent",
|
||||
"duplicate": "Kopieren",
|
||||
"f": "F",
|
||||
"h": "H",
|
||||
"depthMidasDescription": "Tiefenmap erstellen mit Midas",
|
||||
"controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))",
|
||||
"t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) ist aktiv, $t(common.controlNet) ist deaktiviert",
|
||||
"weight": "Breite",
|
||||
"selectModel": "Wähle ein Modell",
|
||||
"depthMidas": "Tiefe (Midas)",
|
||||
"w": "W",
|
||||
"addControlNet": "$t(common.controlNet) hinzufügen",
|
||||
"none": "Kein",
|
||||
"incompatibleBaseModel": "Inkompatibles Basismodell:",
|
||||
"enableControlnet": "Aktiviere ControlNet",
|
||||
"detectResolution": "Auflösung erkennen",
|
||||
"controlNetT2IMutexDesc": "$t(common.controlNet) und $t(common.t2iAdapter) zur gleichen Zeit wird nicht unterstützt.",
|
||||
"ip_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.ipAdapter))",
|
||||
"fill": "Füllen",
|
||||
"addIPAdapter": "$t(common.ipAdapter) hinzufügen",
|
||||
"colorMapDescription": "Erstelle eine Farbkarte von diesem Bild",
|
||||
"t2i_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.t2iAdapter))",
|
||||
"imageResolution": "Bild Auflösung",
|
||||
"depthZoe": "Tiefe (Zoe)",
|
||||
"colorMap": "Farbe",
|
||||
"lowThreshold": "Niedrige Schwelle",
|
||||
"highThreshold": "Hohe Schwelle",
|
||||
"toggleControlNet": "Schalten ControlNet um",
|
||||
"delete": "Löschen",
|
||||
"controlAdapter_one": "Control Adapter",
|
||||
"controlAdapter_other": "Control Adapters",
|
||||
"colorMapTileSize": "Tile Größe",
|
||||
"depthZoeDescription": "Tiefenmap erstellen mit Zoe",
|
||||
"setControlImageDimensions": "Setze Control Bild Auflösung auf Breite/Höhe",
|
||||
"handAndFace": "Hand und Gesicht",
|
||||
"enableIPAdapter": "Aktiviere IP Adapter",
|
||||
"resize": "Größe ändern",
|
||||
"resetControlImage": "Zurücksetzen vom Referenz Bild",
|
||||
"balanced": "Ausgewogen",
|
||||
"prompt": "Prompt",
|
||||
"resizeMode": "Größenänderungsmodus",
|
||||
"processor": "Prozessor",
|
||||
"saveControlImage": "Speichere Referenz Bild",
|
||||
"safe": "Speichern",
|
||||
"ipAdapterImageFallback": "Kein IP Adapter Bild ausgewählt",
|
||||
"resetIPAdapterImage": "Zurücksetzen vom IP Adapter Bild",
|
||||
"pidi": "PIDI",
|
||||
"normalBae": "Normales BAE",
|
||||
"mlsdDescription": "Minimalistischer Liniensegmentdetektor",
|
||||
"openPoseDescription": "Schätzung der menschlichen Pose mit Openpose",
|
||||
"control": "Kontrolle",
|
||||
"coarse": "Coarse",
|
||||
"crop": "Zuschneiden",
|
||||
"pidiDescription": "PIDI-Bildverarbeitung",
|
||||
"mediapipeFace": "Mediapipe Gesichter",
|
||||
"mlsd": "M-LSD",
|
||||
"controlMode": "Steuermodus",
|
||||
"cannyDescription": "Canny Ecken Erkennung",
|
||||
"lineart": "Lineart",
|
||||
"lineartAnimeDescription": "Lineart-Verarbeitung im Anime-Stil",
|
||||
"minConfidence": "Minimales Vertrauen",
|
||||
"megaControl": "Mega-Kontrolle",
|
||||
"autoConfigure": "Prozessor automatisch konfigurieren",
|
||||
"normalBaeDescription": "Normale BAE-Verarbeitung",
|
||||
"noneDescription": "Es wurde keine Verarbeitung angewendet",
|
||||
"openPose": "Openpose",
|
||||
"lineartAnime": "Lineart Anime",
|
||||
"mediapipeFaceDescription": "Gesichtserkennung mit Mediapipe",
|
||||
"canny": "Canny",
|
||||
"hedDescription": "Ganzheitlich verschachtelte Kantenerkennung",
|
||||
"scribble": "Scribble",
|
||||
"maxFaces": "Maximal Anzahl Gesichter"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
"cancelTooltip": "Aktuellen Aufgabe abbrechen",
|
||||
"queueEmpty": "Warteschlange leer",
|
||||
"in_progress": "In Arbeit",
|
||||
"queueFront": "An den Anfang der Warteschlange tun",
|
||||
"completed": "Fertig",
|
||||
"queueBack": "In die Warteschlange",
|
||||
"clearFailed": "Probleme beim leeren der Warteschlange",
|
||||
"clearSucceeded": "Warteschlange geleert",
|
||||
"pause": "Pause",
|
||||
"cancelSucceeded": "Auftrag abgebrochen",
|
||||
"queue": "Warteschlange",
|
||||
"batch": "Stapel",
|
||||
"pending": "Ausstehend",
|
||||
"clear": "Leeren",
|
||||
"prune": "Leeren",
|
||||
"total": "Gesamt",
|
||||
"canceled": "Abgebrochen",
|
||||
"clearTooltip": "Abbrechen und alle Aufträge leeren",
|
||||
"current": "Aktuell",
|
||||
"failed": "Fehler",
|
||||
"cancelItem": "Abbruch Auftrag",
|
||||
"next": "Nächste",
|
||||
"cancel": "Abbruch",
|
||||
"session": "Sitzung",
|
||||
"queueTotal": "{{total}} Gesamt",
|
||||
"resume": "Wieder aufnehmen",
|
||||
"item": "Auftrag",
|
||||
"notReady": "Warteschlange noch nicht bereit",
|
||||
"batchValues": "Stapel Werte",
|
||||
"queueCountPrediction": "{{predicted}} zur Warteschlange hinzufügen",
|
||||
"queuedCount": "{{pending}} wartenden Elemente",
|
||||
"clearQueueAlertDialog": "Die Warteschlange leeren, stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
|
||||
"completedIn": "Fertig in",
|
||||
"cancelBatchSucceeded": "Stapel abgebrochen",
|
||||
"cancelBatch": "Stapel stoppen",
|
||||
"enqueueing": "Stapel in der Warteschlange",
|
||||
"queueMaxExceeded": "Maximum von {{max_queue_size}} Elementen erreicht, würde {{skip}} Elemente überspringen",
|
||||
"cancelBatchFailed": "Problem beim Abbruch vom Stapel",
|
||||
"clearQueueAlertDialog2": "bist du sicher die Warteschlange zu leeren?",
|
||||
"pruneSucceeded": "{{item_count}} abgeschlossene Elemente aus der Warteschlange entfernt",
|
||||
"pauseSucceeded": "Prozessor angehalten",
|
||||
"cancelFailed": "Problem beim Stornieren des Auftrags",
|
||||
"pauseFailed": "Problem beim Anhalten des Prozessors",
|
||||
"front": "Vorne",
|
||||
"pruneTooltip": "Bereinigen Sie {{item_count}} abgeschlossene Aufträge",
|
||||
"resumeFailed": "Problem beim wieder aufnehmen von Prozessor",
|
||||
"pruneFailed": "Problem beim leeren der Warteschlange",
|
||||
"pauseTooltip": "Pause von Prozessor",
|
||||
"back": "Hinten",
|
||||
"resumeSucceeded": "Prozessor wieder aufgenommen",
|
||||
"resumeTooltip": "Prozessor wieder aufnehmen"
|
||||
},
|
||||
"metadata": {
|
||||
"negativePrompt": "Negativ Beschreibung",
|
||||
"metadata": "Meta-Data",
|
||||
"strength": "Bild zu Bild stärke",
|
||||
"imageDetails": "Bild Details",
|
||||
"model": "Modell",
|
||||
"noImageDetails": "Keine Bild Details gefunden",
|
||||
"cfgScale": "CFG-Skala",
|
||||
"fit": "Bild zu Bild passen",
|
||||
"height": "Höhe",
|
||||
"noMetaData": "Keine Meta-Data gefunden",
|
||||
"width": "Breite",
|
||||
"createdBy": "Erstellt von",
|
||||
"steps": "Schritte",
|
||||
"seamless": "Nahtlos",
|
||||
"positivePrompt": "Positiver Prompt",
|
||||
"generationMode": "Generierungsmodus",
|
||||
"Threshold": "Noise Schwelle",
|
||||
"seed": "Samen",
|
||||
"perlin": "Perlin Noise",
|
||||
"hiresFix": "Optimierung für hohe Auflösungen",
|
||||
"initImage": "Erstes Bild",
|
||||
"variations": "Samengewichtspaare",
|
||||
"vae": "VAE",
|
||||
"workflow": "Arbeitsablauf",
|
||||
"scheduler": "Scheduler",
|
||||
"noRecallParameters": "Es wurden keine Parameter zum Abrufen gefunden"
|
||||
},
|
||||
"popovers": {
|
||||
"noiseUseCPU": {
|
||||
"heading": "Nutze Prozessor rauschen"
|
||||
},
|
||||
"paramModel": {
|
||||
"heading": "Modell"
|
||||
},
|
||||
"paramIterations": {
|
||||
"heading": "Iterationen"
|
||||
},
|
||||
"paramCFGScale": {
|
||||
"heading": "CFG-Skala"
|
||||
},
|
||||
"paramSteps": {
|
||||
"heading": "Schritte"
|
||||
},
|
||||
"lora": {
|
||||
"heading": "LoRA Gewichte"
|
||||
},
|
||||
"infillMethod": {
|
||||
"heading": "Füllmethode"
|
||||
},
|
||||
"paramVAE": {
|
||||
"heading": "VAE"
|
||||
}
|
||||
},
|
||||
"ui": {
|
||||
"lockRatio": "Verhältnis sperren",
|
||||
"hideProgressImages": "Verstecke Prozess Bild",
|
||||
"showProgressImages": "Zeige Prozess Bild"
|
||||
},
|
||||
"invocationCache": {
|
||||
"disable": "Deaktivieren",
|
||||
"misses": "Cache Nötig",
|
||||
"hits": "Cache Treffer",
|
||||
"enable": "Aktivieren",
|
||||
"clear": "Leeren",
|
||||
"maxCacheSize": "Maximale Cache Größe",
|
||||
"cacheSize": "Cache Größe"
|
||||
},
|
||||
"embedding": {
|
||||
"noMatchingEmbedding": "Keine passenden Embeddings",
|
||||
"addEmbedding": "Embedding hinzufügen",
|
||||
"incompatibleModel": "Inkompatibles Basismodell:"
|
||||
},
|
||||
"nodes": {
|
||||
"booleanPolymorphicDescription": "Eine Sammlung boolescher Werte.",
|
||||
"colorFieldDescription": "Eine RGBA-Farbe.",
|
||||
"conditioningCollection": "Konditionierungssammlung",
|
||||
"addNode": "Knoten hinzufügen",
|
||||
"conditioningCollectionDescription": "Konditionierung kann zwischen Knoten weitergegeben werden.",
|
||||
"colorPolymorphic": "Farbpolymorph",
|
||||
"colorCodeEdgesHelp": "Farbkodieren Sie Kanten entsprechend ihren verbundenen Feldern",
|
||||
"animatedEdges": "Animierte Kanten",
|
||||
"booleanCollectionDescription": "Eine Sammlung boolescher Werte.",
|
||||
"colorField": "Farbe",
|
||||
"collectionItem": "Objekt in Sammlung",
|
||||
"animatedEdgesHelp": "Animieren Sie ausgewählte Kanten und Kanten, die mit ausgewählten Knoten verbunden sind",
|
||||
"cannotDuplicateConnection": "Es können keine doppelten Verbindungen erstellt werden",
|
||||
"booleanPolymorphic": "Boolesche Polymorphie",
|
||||
"colorPolymorphicDescription": "Eine Sammlung von Farben.",
|
||||
"clipFieldDescription": "Tokenizer- und text_encoder-Untermodelle.",
|
||||
"clipField": "Clip",
|
||||
"colorCollection": "Eine Sammlung von Farben.",
|
||||
"boolean": "Boolesche Werte",
|
||||
"currentImage": "Aktuelles Bild",
|
||||
"booleanDescription": "Boolesche Werte sind wahr oder falsch.",
|
||||
"collection": "Sammlung",
|
||||
"cannotConnectInputToInput": "Eingang kann nicht mit Eingang verbunden werden",
|
||||
"conditioningField": "Konditionierung",
|
||||
"cannotConnectOutputToOutput": "Ausgang kann nicht mit Ausgang verbunden werden",
|
||||
"booleanCollection": "Boolesche Werte Sammlung",
|
||||
"cannotConnectToSelf": "Es kann keine Verbindung zu sich selbst hergestellt werden",
|
||||
"colorCodeEdges": "Farbkodierte Kanten",
|
||||
"addNodeToolTip": "Knoten hinzufügen (Umschalt+A, Leertaste)"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Aktivieren Sie die Korrektur für hohe Auflösungen",
|
||||
"upscaleMethod": "Vergrößerungsmethoden",
|
||||
"enableHrfTooltip": "Generieren Sie mit einer niedrigeren Anfangsauflösung, skalieren Sie auf die Basisauflösung hoch und führen Sie dann Image-to-Image aus.",
|
||||
"metadata": {
|
||||
"strength": "Hochauflösender Fix Stärke",
|
||||
"enabled": "Hochauflösender Fix aktiviert",
|
||||
"method": "Hochauflösender Fix Methode"
|
||||
},
|
||||
"hrf": "Hochauflösender Fix",
|
||||
"hrfStrength": "Hochauflösende Fix Stärke",
|
||||
"strengthTooltip": "Niedrigere Werte führen zu weniger Details, wodurch potenzielle Artefakte reduziert werden können."
|
||||
},
|
||||
"models": {
|
||||
"noMatchingModels": "Keine passenden Modelle",
|
||||
"loading": "lade",
|
||||
"noMatchingLoRAs": "Keine passenden LoRAs",
|
||||
"noLoRAsAvailable": "Keine LoRAs verfügbar",
|
||||
"noModelsAvailable": "Keine Modelle verfügbar",
|
||||
"selectModel": "Wählen ein Modell aus",
|
||||
"noRefinerModelsInstalled": "Keine SDXL Refiner-Modelle installiert",
|
||||
"noLoRAsInstalled": "Keine LoRAs installiert",
|
||||
"selectLoRA": "Wählen ein LoRA aus"
|
||||
}
|
||||
}
|
||||
1558
invokeai/frontend/web/dist/locales/en.json
vendored
Normal file
1558
invokeai/frontend/web/dist/locales/en.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
737
invokeai/frontend/web/dist/locales/es.json
vendored
Normal file
737
invokeai/frontend/web/dist/locales/es.json
vendored
Normal file
@@ -0,0 +1,737 @@
|
||||
{
|
||||
"common": {
|
||||
"hotkeysLabel": "Atajos de teclado",
|
||||
"languagePickerLabel": "Selector de idioma",
|
||||
"reportBugLabel": "Reportar errores",
|
||||
"settingsLabel": "Ajustes",
|
||||
"img2img": "Imagen a Imagen",
|
||||
"unifiedCanvas": "Lienzo Unificado",
|
||||
"nodes": "Editor del flujo de trabajo",
|
||||
"langSpanish": "Español",
|
||||
"nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.",
|
||||
"postProcessing": "Post-procesamiento",
|
||||
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador.",
|
||||
"postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.",
|
||||
"postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.",
|
||||
"training": "Entrenamiento",
|
||||
"trainingDesc1": "Un flujo de trabajo dedicado para el entrenamiento de sus propios -embeddings- y puntos de control utilizando Inversión Textual y Dreambooth desde la interfaz web.",
|
||||
"trainingDesc2": "InvokeAI ya admite el entrenamiento de incrustaciones personalizadas mediante la inversión textual mediante el script principal.",
|
||||
"upload": "Subir imagen",
|
||||
"close": "Cerrar",
|
||||
"load": "Cargar",
|
||||
"statusConnected": "Conectado",
|
||||
"statusDisconnected": "Desconectado",
|
||||
"statusError": "Error",
|
||||
"statusPreparing": "Preparando",
|
||||
"statusProcessingCanceled": "Procesamiento Cancelado",
|
||||
"statusProcessingComplete": "Procesamiento Completo",
|
||||
"statusGenerating": "Generando",
|
||||
"statusGeneratingTextToImage": "Generando Texto a Imagen",
|
||||
"statusGeneratingImageToImage": "Generando Imagen a Imagen",
|
||||
"statusGeneratingInpainting": "Generando pintura interior",
|
||||
"statusGeneratingOutpainting": "Generando pintura exterior",
|
||||
"statusGenerationComplete": "Generación Completa",
|
||||
"statusIterationComplete": "Iteración Completa",
|
||||
"statusSavingImage": "Guardando Imagen",
|
||||
"statusRestoringFaces": "Restaurando Rostros",
|
||||
"statusRestoringFacesGFPGAN": "Restaurando Rostros (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restaurando Rostros (CodeFormer)",
|
||||
"statusUpscaling": "Aumentando Tamaño",
|
||||
"statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)",
|
||||
"statusLoadingModel": "Cargando Modelo",
|
||||
"statusModelChanged": "Modelo cambiado",
|
||||
"statusMergedModels": "Modelos combinados",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"langEnglish": "Inglés",
|
||||
"langDutch": "Holandés",
|
||||
"langFrench": "Francés",
|
||||
"langGerman": "Alemán",
|
||||
"langItalian": "Italiano",
|
||||
"langArabic": "Árabe",
|
||||
"langJapanese": "Japones",
|
||||
"langPolish": "Polaco",
|
||||
"langBrPortuguese": "Portugués brasileño",
|
||||
"langRussian": "Ruso",
|
||||
"langSimplifiedChinese": "Chino simplificado",
|
||||
"langUkranian": "Ucraniano",
|
||||
"back": "Atrás",
|
||||
"statusConvertingModel": "Convertir el modelo",
|
||||
"statusModelConverted": "Modelo adaptado",
|
||||
"statusMergingModels": "Fusionar modelos",
|
||||
"langPortuguese": "Portugués",
|
||||
"langKorean": "Coreano",
|
||||
"langHebrew": "Hebreo",
|
||||
"loading": "Cargando",
|
||||
"loadingInvokeAI": "Cargando invocar a la IA",
|
||||
"postprocessing": "Tratamiento posterior",
|
||||
"txt2img": "De texto a imagen",
|
||||
"accept": "Aceptar",
|
||||
"cancel": "Cancelar",
|
||||
"linear": "Lineal",
|
||||
"random": "Aleatorio",
|
||||
"generate": "Generar",
|
||||
"openInNewTab": "Abrir en una nueva pestaña",
|
||||
"dontAskMeAgain": "No me preguntes de nuevo",
|
||||
"areYouSure": "¿Estas seguro?",
|
||||
"imagePrompt": "Indicación de imagen",
|
||||
"batch": "Administrador de lotes",
|
||||
"darkMode": "Modo oscuro",
|
||||
"lightMode": "Modo claro",
|
||||
"modelManager": "Administrador de modelos",
|
||||
"communityLabel": "Comunidad"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Generaciones",
|
||||
"showGenerations": "Mostrar Generaciones",
|
||||
"uploads": "Subidas de archivos",
|
||||
"showUploads": "Mostar Subidas",
|
||||
"galleryImageSize": "Tamaño de la imagen",
|
||||
"galleryImageResetSize": "Restablecer tamaño de la imagen",
|
||||
"gallerySettings": "Ajustes de la galería",
|
||||
"maintainAspectRatio": "Mantener relación de aspecto",
|
||||
"autoSwitchNewImages": "Auto seleccionar Imágenes nuevas",
|
||||
"singleColumnLayout": "Diseño de una columna",
|
||||
"allImagesLoaded": "Todas las imágenes cargadas",
|
||||
"loadMore": "Cargar más",
|
||||
"noImagesInGallery": "No hay imágenes para mostrar",
|
||||
"deleteImage": "Eliminar Imagen",
|
||||
"deleteImageBin": "Las imágenes eliminadas se enviarán a la papelera de tu sistema operativo.",
|
||||
"deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.",
|
||||
"images": "Imágenes",
|
||||
"assets": "Activos",
|
||||
"autoAssignBoardOnClick": "Asignación automática de tableros al hacer clic"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Atajos de teclado",
|
||||
"appHotkeys": "Atajos de applicación",
|
||||
"generalHotkeys": "Atajos generales",
|
||||
"galleryHotkeys": "Atajos de galería",
|
||||
"unifiedCanvasHotkeys": "Atajos de lienzo unificado",
|
||||
"invoke": {
|
||||
"title": "Invocar",
|
||||
"desc": "Generar una imagen"
|
||||
},
|
||||
"cancel": {
|
||||
"title": "Cancelar",
|
||||
"desc": "Cancelar el proceso de generación de imagen"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "Mover foco a Entrada de texto",
|
||||
"desc": "Mover foco hacia el campo de texto de la Entrada"
|
||||
},
|
||||
"toggleOptions": {
|
||||
"title": "Alternar opciones",
|
||||
"desc": "Mostar y ocultar el panel de opciones"
|
||||
},
|
||||
"pinOptions": {
|
||||
"title": "Fijar opciones",
|
||||
"desc": "Fijar el panel de opciones"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Alternar visor",
|
||||
"desc": "Mostar y ocultar el visor de imágenes"
|
||||
},
|
||||
"toggleGallery": {
|
||||
"title": "Alternar galería",
|
||||
"desc": "Mostar y ocultar la galería de imágenes"
|
||||
},
|
||||
"maximizeWorkSpace": {
|
||||
"title": "Maximizar espacio de trabajo",
|
||||
"desc": "Cerrar otros páneles y maximizar el espacio de trabajo"
|
||||
},
|
||||
"changeTabs": {
|
||||
"title": "Cambiar",
|
||||
"desc": "Cambiar entre áreas de trabajo"
|
||||
},
|
||||
"consoleToggle": {
|
||||
"title": "Alternar consola",
|
||||
"desc": "Mostar y ocultar la consola"
|
||||
},
|
||||
"setPrompt": {
|
||||
"title": "Establecer Entrada",
|
||||
"desc": "Usar el texto de entrada de la imagen actual"
|
||||
},
|
||||
"setSeed": {
|
||||
"title": "Establecer semilla",
|
||||
"desc": "Usar la semilla de la imagen actual"
|
||||
},
|
||||
"setParameters": {
|
||||
"title": "Establecer parámetros",
|
||||
"desc": "Usar todos los parámetros de la imagen actual"
|
||||
},
|
||||
"restoreFaces": {
|
||||
"title": "Restaurar rostros",
|
||||
"desc": "Restaurar rostros en la imagen actual"
|
||||
},
|
||||
"upscale": {
|
||||
"title": "Aumentar resolución",
|
||||
"desc": "Aumentar la resolución de la imagen actual"
|
||||
},
|
||||
"showInfo": {
|
||||
"title": "Mostrar información",
|
||||
"desc": "Mostar metadatos de la imagen actual"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "Enviar hacia Imagen a Imagen",
|
||||
"desc": "Enviar imagen actual hacia Imagen a Imagen"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "Eliminar imagen",
|
||||
"desc": "Eliminar imagen actual"
|
||||
},
|
||||
"closePanels": {
|
||||
"title": "Cerrar páneles",
|
||||
"desc": "Cerrar los páneles abiertos"
|
||||
},
|
||||
"previousImage": {
|
||||
"title": "Imagen anterior",
|
||||
"desc": "Muetra la imagen anterior en la galería"
|
||||
},
|
||||
"nextImage": {
|
||||
"title": "Imagen siguiente",
|
||||
"desc": "Muetra la imagen siguiente en la galería"
|
||||
},
|
||||
"toggleGalleryPin": {
|
||||
"title": "Alternar fijado de galería",
|
||||
"desc": "Fijar o desfijar la galería en la interfaz"
|
||||
},
|
||||
"increaseGalleryThumbSize": {
|
||||
"title": "Aumentar imagen en galería",
|
||||
"desc": "Aumenta el tamaño de las miniaturas de la galería"
|
||||
},
|
||||
"decreaseGalleryThumbSize": {
|
||||
"title": "Reducir imagen en galería",
|
||||
"desc": "Reduce el tamaño de las miniaturas de la galería"
|
||||
},
|
||||
"selectBrush": {
|
||||
"title": "Seleccionar pincel",
|
||||
"desc": "Selecciona el pincel en el lienzo"
|
||||
},
|
||||
"selectEraser": {
|
||||
"title": "Seleccionar borrador",
|
||||
"desc": "Selecciona el borrador en el lienzo"
|
||||
},
|
||||
"decreaseBrushSize": {
|
||||
"title": "Disminuir tamaño de herramienta",
|
||||
"desc": "Disminuye el tamaño del pincel/borrador en el lienzo"
|
||||
},
|
||||
"increaseBrushSize": {
|
||||
"title": "Aumentar tamaño del pincel",
|
||||
"desc": "Aumenta el tamaño del pincel en el lienzo"
|
||||
},
|
||||
"decreaseBrushOpacity": {
|
||||
"title": "Disminuir opacidad del pincel",
|
||||
"desc": "Disminuye la opacidad del pincel en el lienzo"
|
||||
},
|
||||
"increaseBrushOpacity": {
|
||||
"title": "Aumentar opacidad del pincel",
|
||||
"desc": "Aumenta la opacidad del pincel en el lienzo"
|
||||
},
|
||||
"moveTool": {
|
||||
"title": "Herramienta de movimiento",
|
||||
"desc": "Permite navegar por el lienzo"
|
||||
},
|
||||
"fillBoundingBox": {
|
||||
"title": "Rellenar Caja contenedora",
|
||||
"desc": "Rellena la caja contenedora con el color seleccionado"
|
||||
},
|
||||
"eraseBoundingBox": {
|
||||
"title": "Borrar Caja contenedora",
|
||||
"desc": "Borra el contenido dentro de la caja contenedora"
|
||||
},
|
||||
"colorPicker": {
|
||||
"title": "Selector de color",
|
||||
"desc": "Selecciona un color del lienzo"
|
||||
},
|
||||
"toggleSnap": {
|
||||
"title": "Alternar ajuste de cuadrícula",
|
||||
"desc": "Activa o desactiva el ajuste automático a la cuadrícula"
|
||||
},
|
||||
"quickToggleMove": {
|
||||
"title": "Alternar movimiento rápido",
|
||||
"desc": "Activa momentáneamente la herramienta de movimiento"
|
||||
},
|
||||
"toggleLayer": {
|
||||
"title": "Alternar capa",
|
||||
"desc": "Alterna entre las capas de máscara y base"
|
||||
},
|
||||
"clearMask": {
|
||||
"title": "Limpiar máscara",
|
||||
"desc": "Limpia toda la máscara actual"
|
||||
},
|
||||
"hideMask": {
|
||||
"title": "Ocultar máscara",
|
||||
"desc": "Oculta o muetre la máscara actual"
|
||||
},
|
||||
"showHideBoundingBox": {
|
||||
"title": "Alternar caja contenedora",
|
||||
"desc": "Muestra u oculta la caja contenedora"
|
||||
},
|
||||
"mergeVisible": {
|
||||
"title": "Consolida capas visibles",
|
||||
"desc": "Consolida todas las capas visibles en una sola"
|
||||
},
|
||||
"saveToGallery": {
|
||||
"title": "Guardar en galería",
|
||||
"desc": "Guardar la imagen actual del lienzo en la galería"
|
||||
},
|
||||
"copyToClipboard": {
|
||||
"title": "Copiar al portapapeles",
|
||||
"desc": "Copiar el lienzo actual al portapapeles"
|
||||
},
|
||||
"downloadImage": {
|
||||
"title": "Descargar imagen",
|
||||
"desc": "Descargar la imagen actual del lienzo"
|
||||
},
|
||||
"undoStroke": {
|
||||
"title": "Deshar trazo",
|
||||
"desc": "Desahacer el último trazo del pincel"
|
||||
},
|
||||
"redoStroke": {
|
||||
"title": "Rehacer trazo",
|
||||
"desc": "Rehacer el último trazo del pincel"
|
||||
},
|
||||
"resetView": {
|
||||
"title": "Restablecer vista",
|
||||
"desc": "Restablecer la vista del lienzo"
|
||||
},
|
||||
"previousStagingImage": {
|
||||
"title": "Imagen anterior",
|
||||
"desc": "Imagen anterior en el área de preparación"
|
||||
},
|
||||
"nextStagingImage": {
|
||||
"title": "Imagen siguiente",
|
||||
"desc": "Siguiente imagen en el área de preparación"
|
||||
},
|
||||
"acceptStagingImage": {
|
||||
"title": "Aceptar imagen",
|
||||
"desc": "Aceptar la imagen actual en el área de preparación"
|
||||
},
|
||||
"addNodes": {
|
||||
"title": "Añadir Nodos",
|
||||
"desc": "Abre el menú para añadir nodos"
|
||||
},
|
||||
"nodesHotkeys": "Teclas de acceso rápido a los nodos"
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "Gestor de Modelos",
|
||||
"model": "Modelo",
|
||||
"modelAdded": "Modelo añadido",
|
||||
"modelUpdated": "Modelo actualizado",
|
||||
"modelEntryDeleted": "Endrada de Modelo eliminada",
|
||||
"cannotUseSpaces": "No se pueden usar Spaces",
|
||||
"addNew": "Añadir nuevo",
|
||||
"addNewModel": "Añadir nuevo modelo",
|
||||
"addManually": "Añadir manualmente",
|
||||
"manual": "Manual",
|
||||
"name": "Nombre",
|
||||
"nameValidationMsg": "Introduce un nombre para tu modelo",
|
||||
"description": "Descripción",
|
||||
"descriptionValidationMsg": "Introduce una descripción para tu modelo",
|
||||
"config": "Configurar",
|
||||
"configValidationMsg": "Ruta del archivo de configuración del modelo.",
|
||||
"modelLocation": "Ubicación del Modelo",
|
||||
"modelLocationValidationMsg": "Ruta del archivo de modelo.",
|
||||
"vaeLocation": "Ubicación VAE",
|
||||
"vaeLocationValidationMsg": "Ruta del archivo VAE.",
|
||||
"width": "Ancho",
|
||||
"widthValidationMsg": "Ancho predeterminado de tu modelo.",
|
||||
"height": "Alto",
|
||||
"heightValidationMsg": "Alto predeterminado de tu modelo.",
|
||||
"addModel": "Añadir Modelo",
|
||||
"updateModel": "Actualizar Modelo",
|
||||
"availableModels": "Modelos disponibles",
|
||||
"search": "Búsqueda",
|
||||
"load": "Cargar",
|
||||
"active": "activo",
|
||||
"notLoaded": "no cargado",
|
||||
"cached": "en caché",
|
||||
"checkpointFolder": "Directorio de Checkpoint",
|
||||
"clearCheckpointFolder": "Limpiar directorio de checkpoint",
|
||||
"findModels": "Buscar modelos",
|
||||
"scanAgain": "Escanear de nuevo",
|
||||
"modelsFound": "Modelos encontrados",
|
||||
"selectFolder": "Selecciona un directorio",
|
||||
"selected": "Seleccionado",
|
||||
"selectAll": "Seleccionar todo",
|
||||
"deselectAll": "Deseleccionar todo",
|
||||
"showExisting": "Mostrar existentes",
|
||||
"addSelected": "Añadir seleccionados",
|
||||
"modelExists": "Modelo existente",
|
||||
"selectAndAdd": "Selecciona de la lista un modelo para añadir",
|
||||
"noModelsFound": "No se encontró ningún modelo",
|
||||
"delete": "Eliminar",
|
||||
"deleteModel": "Eliminar Modelo",
|
||||
"deleteConfig": "Eliminar Configuración",
|
||||
"deleteMsg1": "¿Estás seguro de que deseas eliminar este modelo de InvokeAI?",
|
||||
"deleteMsg2": "Esto eliminará el modelo del disco si está en la carpeta raíz de InvokeAI. Si está utilizando una ubicación personalizada, el modelo NO se eliminará del disco.",
|
||||
"safetensorModels": "SafeTensors",
|
||||
"addDiffuserModel": "Añadir difusores",
|
||||
"inpainting": "v1 Repintado",
|
||||
"repoIDValidationMsg": "Repositorio en línea de tu modelo",
|
||||
"checkpointModels": "Puntos de control",
|
||||
"convertToDiffusersHelpText4": "Este proceso se realiza una sola vez. Puede tardar entre 30 y 60 segundos dependiendo de las especificaciones de tu ordenador.",
|
||||
"diffusersModels": "Difusores",
|
||||
"addCheckpointModel": "Agregar modelo de punto de control/Modelo Safetensor",
|
||||
"vaeRepoID": "Identificador del repositorio de VAE",
|
||||
"vaeRepoIDValidationMsg": "Repositorio en línea de tú VAE",
|
||||
"formMessageDiffusersModelLocation": "Difusores Modelo Ubicación",
|
||||
"formMessageDiffusersModelLocationDesc": "Por favor, introduzca al menos uno.",
|
||||
"formMessageDiffusersVAELocation": "Ubicación VAE",
|
||||
"formMessageDiffusersVAELocationDesc": "Si no se proporciona, InvokeAI buscará el archivo VAE dentro de la ubicación del modelo indicada anteriormente.",
|
||||
"convert": "Convertir",
|
||||
"convertToDiffusers": "Convertir en difusores",
|
||||
"convertToDiffusersHelpText1": "Este modelo se convertirá al formato 🧨 Difusores.",
|
||||
"convertToDiffusersHelpText2": "Este proceso sustituirá su entrada del Gestor de Modelos por la versión de Difusores del mismo modelo.",
|
||||
"convertToDiffusersHelpText3": "Tu archivo del punto de control en el disco se eliminará si está en la carpeta raíz de InvokeAI. Si está en una ubicación personalizada, NO se eliminará.",
|
||||
"convertToDiffusersHelpText5": "Por favor, asegúrate de tener suficiente espacio en el disco. Los modelos generalmente varían entre 2 GB y 7 GB de tamaño.",
|
||||
"convertToDiffusersHelpText6": "¿Desea transformar este modelo?",
|
||||
"convertToDiffusersSaveLocation": "Guardar ubicación",
|
||||
"v1": "v1",
|
||||
"statusConverting": "Adaptar",
|
||||
"modelConverted": "Modelo adaptado",
|
||||
"sameFolder": "La misma carpeta",
|
||||
"invokeRoot": "Carpeta InvokeAI",
|
||||
"custom": "Personalizado",
|
||||
"customSaveLocation": "Ubicación personalizada para guardar",
|
||||
"merge": "Fusión",
|
||||
"modelsMerged": "Modelos fusionados",
|
||||
"mergeModels": "Combinar modelos",
|
||||
"modelOne": "Modelo 1",
|
||||
"modelTwo": "Modelo 2",
|
||||
"modelThree": "Modelo 3",
|
||||
"mergedModelName": "Nombre del modelo combinado",
|
||||
"alpha": "Alfa",
|
||||
"interpolationType": "Tipo de interpolación",
|
||||
"mergedModelSaveLocation": "Guardar ubicación",
|
||||
"mergedModelCustomSaveLocation": "Ruta personalizada",
|
||||
"invokeAIFolder": "Invocar carpeta de la inteligencia artificial",
|
||||
"modelMergeHeaderHelp2": "Sólo se pueden fusionar difusores. Si desea fusionar un modelo de punto de control, conviértalo primero en difusores.",
|
||||
"modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.",
|
||||
"modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.",
|
||||
"ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados",
|
||||
"modelMergeHeaderHelp1": "Puede unir hasta tres modelos diferentes para crear una combinación que se adapte a sus necesidades.",
|
||||
"inverseSigmoid": "Sigmoideo inverso",
|
||||
"weightedSum": "Modelo de suma ponderada",
|
||||
"sigmoid": "Función sigmoide",
|
||||
"allModels": "Todos los modelos",
|
||||
"repo_id": "Identificador del repositorio",
|
||||
"pathToCustomConfig": "Ruta a la configuración personalizada",
|
||||
"customConfig": "Configuración personalizada",
|
||||
"v2_base": "v2 (512px)",
|
||||
"none": "ninguno",
|
||||
"pickModelType": "Elige el tipo de modelo",
|
||||
"v2_768": "v2 (768px)",
|
||||
"addDifference": "Añadir una diferencia",
|
||||
"scanForModels": "Buscar modelos",
|
||||
"vae": "VAE",
|
||||
"variant": "Variante",
|
||||
"baseModel": "Modelo básico",
|
||||
"modelConversionFailed": "Conversión al modelo fallida",
|
||||
"selectModel": "Seleccionar un modelo",
|
||||
"modelUpdateFailed": "Error al actualizar el modelo",
|
||||
"modelsMergeFailed": "Fusión del modelo fallida",
|
||||
"convertingModelBegin": "Convirtiendo el modelo. Por favor, espere.",
|
||||
"modelDeleted": "Modelo eliminado",
|
||||
"modelDeleteFailed": "Error al borrar el modelo",
|
||||
"noCustomLocationProvided": "‐No se proporcionó una ubicación personalizada",
|
||||
"importModels": "Importar los modelos",
|
||||
"settings": "Ajustes",
|
||||
"syncModels": "Sincronizar las plantillas",
|
||||
"syncModelsDesc": "Si tus plantillas no están sincronizados con el backend, puedes actualizarlas usando esta opción. Esto suele ser útil en los casos en los que actualizas manualmente tu archivo models.yaml o añades plantillas a la carpeta raíz de InvokeAI después de que la aplicación haya arrancado.",
|
||||
"modelsSynced": "Plantillas sincronizadas",
|
||||
"modelSyncFailed": "La sincronización de la plantilla falló",
|
||||
"loraModels": "LoRA",
|
||||
"onnxModels": "Onnx",
|
||||
"oliveModels": "Olives"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Imágenes",
|
||||
"steps": "Pasos",
|
||||
"cfgScale": "Escala CFG",
|
||||
"width": "Ancho",
|
||||
"height": "Alto",
|
||||
"seed": "Semilla",
|
||||
"randomizeSeed": "Semilla aleatoria",
|
||||
"shuffle": "Semilla aleatoria",
|
||||
"noiseThreshold": "Umbral de Ruido",
|
||||
"perlinNoise": "Ruido Perlin",
|
||||
"variations": "Variaciones",
|
||||
"variationAmount": "Cantidad de Variación",
|
||||
"seedWeights": "Peso de las semillas",
|
||||
"faceRestoration": "Restauración de Rostros",
|
||||
"restoreFaces": "Restaurar rostros",
|
||||
"type": "Tipo",
|
||||
"strength": "Fuerza",
|
||||
"upscaling": "Aumento de resolución",
|
||||
"upscale": "Aumentar resolución",
|
||||
"upscaleImage": "Aumentar la resolución de la imagen",
|
||||
"scale": "Escala",
|
||||
"otherOptions": "Otras opciones",
|
||||
"seamlessTiling": "Mosaicos sin parches",
|
||||
"hiresOptim": "Optimización de Alta Resolución",
|
||||
"imageFit": "Ajuste tamaño de imagen inicial al tamaño objetivo",
|
||||
"codeformerFidelity": "Fidelidad",
|
||||
"scaleBeforeProcessing": "Redimensionar antes de procesar",
|
||||
"scaledWidth": "Ancho escalado",
|
||||
"scaledHeight": "Alto escalado",
|
||||
"infillMethod": "Método de relleno",
|
||||
"tileSize": "Tamaño del mosaico",
|
||||
"boundingBoxHeader": "Caja contenedora",
|
||||
"seamCorrectionHeader": "Corrección de parches",
|
||||
"infillScalingHeader": "Remplazo y escalado",
|
||||
"img2imgStrength": "Peso de Imagen a Imagen",
|
||||
"toggleLoopback": "Alternar Retroalimentación",
|
||||
"sendTo": "Enviar a",
|
||||
"sendToImg2Img": "Enviar a Imagen a Imagen",
|
||||
"sendToUnifiedCanvas": "Enviar a Lienzo Unificado",
|
||||
"copyImageToLink": "Copiar imagen a enlace",
|
||||
"downloadImage": "Descargar imagen",
|
||||
"openInViewer": "Abrir en Visor",
|
||||
"closeViewer": "Cerrar Visor",
|
||||
"usePrompt": "Usar Entrada",
|
||||
"useSeed": "Usar Semilla",
|
||||
"useAll": "Usar Todo",
|
||||
"useInitImg": "Usar Imagen Inicial",
|
||||
"info": "Información",
|
||||
"initialImage": "Imagen Inicial",
|
||||
"showOptionsPanel": "Mostrar panel de opciones",
|
||||
"symmetry": "Simetría",
|
||||
"vSymmetryStep": "Paso de simetría V",
|
||||
"hSymmetryStep": "Paso de simetría H",
|
||||
"cancel": {
|
||||
"immediate": "Cancelar inmediatamente",
|
||||
"schedule": "Cancelar tras la iteración actual",
|
||||
"isScheduled": "Cancelando",
|
||||
"setType": "Tipo de cancelación"
|
||||
},
|
||||
"copyImage": "Copiar la imagen",
|
||||
"general": "General",
|
||||
"imageToImage": "Imagen a imagen",
|
||||
"denoisingStrength": "Intensidad de la eliminación del ruido",
|
||||
"hiresStrength": "Alta resistencia",
|
||||
"showPreview": "Mostrar la vista previa",
|
||||
"hidePreview": "Ocultar la vista previa",
|
||||
"noiseSettings": "Ruido",
|
||||
"seamlessXAxis": "Eje x",
|
||||
"seamlessYAxis": "Eje y",
|
||||
"scheduler": "Programador",
|
||||
"boundingBoxWidth": "Anchura del recuadro",
|
||||
"boundingBoxHeight": "Altura del recuadro",
|
||||
"positivePromptPlaceholder": "Prompt Positivo",
|
||||
"negativePromptPlaceholder": "Prompt Negativo",
|
||||
"controlNetControlMode": "Modo de control",
|
||||
"clipSkip": "Omitir el CLIP",
|
||||
"aspectRatio": "Relación",
|
||||
"maskAdjustmentsHeader": "Ajustes de la máscara",
|
||||
"maskBlur": "Difuminar",
|
||||
"maskBlurMethod": "Método del desenfoque",
|
||||
"seamHighThreshold": "Alto",
|
||||
"seamLowThreshold": "Bajo",
|
||||
"coherencePassHeader": "Parámetros de la coherencia",
|
||||
"compositingSettingsHeader": "Ajustes de la composición",
|
||||
"coherenceSteps": "Pasos",
|
||||
"coherenceStrength": "Fuerza",
|
||||
"patchmatchDownScaleSize": "Reducir a escala",
|
||||
"coherenceMode": "Modo"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelos",
|
||||
"displayInProgress": "Mostrar las imágenes del progreso",
|
||||
"saveSteps": "Guardar imágenes cada n pasos",
|
||||
"confirmOnDelete": "Confirmar antes de eliminar",
|
||||
"displayHelpIcons": "Mostrar iconos de ayuda",
|
||||
"enableImageDebugging": "Habilitar depuración de imágenes",
|
||||
"resetWebUI": "Restablecer interfaz web",
|
||||
"resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.",
|
||||
"resetWebUIDesc2": "Si las imágenes no se muestran en la galería o algo más no funciona, intente restablecer antes de reportar un incidente en GitHub.",
|
||||
"resetComplete": "Se ha restablecido la interfaz web.",
|
||||
"useSlidersForAll": "Utilice controles deslizantes para todas las opciones",
|
||||
"general": "General",
|
||||
"consoleLogLevel": "Nivel del registro",
|
||||
"shouldLogToConsole": "Registro de la consola",
|
||||
"developer": "Desarrollador",
|
||||
"antialiasProgressImages": "Imágenes del progreso de Antialias",
|
||||
"showProgressInViewer": "Mostrar las imágenes del progreso en el visor",
|
||||
"ui": "Interfaz del usuario",
|
||||
"generation": "Generación",
|
||||
"favoriteSchedulers": "Programadores favoritos",
|
||||
"favoriteSchedulersPlaceholder": "No hay programadores favoritos",
|
||||
"showAdvancedOptions": "Mostrar las opciones avanzadas",
|
||||
"alternateCanvasLayout": "Diseño alternativo del lienzo",
|
||||
"beta": "Beta",
|
||||
"enableNodesEditor": "Activar el editor de nodos",
|
||||
"experimental": "Experimental",
|
||||
"autoChangeDimensions": "Actualiza W/H a los valores predeterminados del modelo cuando se modifica"
|
||||
},
|
||||
"toast": {
|
||||
"tempFoldersEmptied": "Directorio temporal vaciado",
|
||||
"uploadFailed": "Error al subir archivo",
|
||||
"uploadFailedUnableToLoadDesc": "No se pudo cargar la imágen",
|
||||
"downloadImageStarted": "Descargando imágen",
|
||||
"imageCopied": "Imágen copiada",
|
||||
"imageLinkCopied": "Enlace de imágen copiado",
|
||||
"imageNotLoaded": "No se cargó la imágen",
|
||||
"imageNotLoadedDesc": "No se pudo encontrar la imagen",
|
||||
"imageSavedToGallery": "Imágen guardada en la galería",
|
||||
"canvasMerged": "Lienzo consolidado",
|
||||
"sentToImageToImage": "Enviar hacia Imagen a Imagen",
|
||||
"sentToUnifiedCanvas": "Enviar hacia Lienzo Consolidado",
|
||||
"parametersSet": "Parámetros establecidos",
|
||||
"parametersNotSet": "Parámetros no establecidos",
|
||||
"parametersNotSetDesc": "No se encontraron metadatos para esta imágen.",
|
||||
"parametersFailed": "Error cargando parámetros",
|
||||
"parametersFailedDesc": "No fue posible cargar la imagen inicial.",
|
||||
"seedSet": "Semilla establecida",
|
||||
"seedNotSet": "Semilla no establecida",
|
||||
"seedNotSetDesc": "No se encontró una semilla para esta imágen.",
|
||||
"promptSet": "Entrada establecida",
|
||||
"promptNotSet": "Entrada no establecida",
|
||||
"promptNotSetDesc": "No se encontró una entrada para esta imágen.",
|
||||
"upscalingFailed": "Error al aumentar tamaño de imagn",
|
||||
"faceRestoreFailed": "Restauración de rostro fallida",
|
||||
"metadataLoadFailed": "Error al cargar metadatos",
|
||||
"initialImageSet": "Imágen inicial establecida",
|
||||
"initialImageNotSet": "Imagen inicial no establecida",
|
||||
"initialImageNotSetDesc": "Error al establecer la imágen inicial",
|
||||
"serverError": "Error en el servidor",
|
||||
"disconnected": "Desconectado del servidor",
|
||||
"canceled": "Procesando la cancelación",
|
||||
"connected": "Conectado al servidor",
|
||||
"problemCopyingImageLink": "No se puede copiar el enlace de la imagen",
|
||||
"uploadFailedInvalidUploadDesc": "Debe ser una sola imagen PNG o JPEG",
|
||||
"parameterSet": "Conjunto de parámetros",
|
||||
"parameterNotSet": "Parámetro no configurado",
|
||||
"nodesSaved": "Nodos guardados",
|
||||
"nodesLoadedFailed": "Error al cargar los nodos",
|
||||
"nodesLoaded": "Nodos cargados",
|
||||
"nodesCleared": "Nodos borrados",
|
||||
"problemCopyingImage": "No se puede copiar la imagen",
|
||||
"nodesNotValidJSON": "JSON no válido",
|
||||
"nodesCorruptedGraph": "No se puede cargar. El gráfico parece estar dañado.",
|
||||
"nodesUnrecognizedTypes": "No se puede cargar. El gráfico tiene tipos no reconocidos",
|
||||
"nodesNotValidGraph": "Gráfico del nodo InvokeAI no válido",
|
||||
"nodesBrokenConnections": "No se puede cargar. Algunas conexiones están rotas."
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
"prompt": "Este campo tomará todo el texto de entrada, incluidos tanto los términos de contenido como los estilísticos. Si bien se pueden incluir pesos en la solicitud, los comandos/parámetros estándar de línea de comandos no funcionarán.",
|
||||
"gallery": "Conforme se generan nuevas invocaciones, los archivos del directorio de salida se mostrarán aquí. Las generaciones tienen opciones adicionales para configurar nuevas generaciones.",
|
||||
"other": "Estas opciones habilitarán modos de procesamiento alternativos para Invoke. 'Seamless mosaico' creará patrones repetitivos en la salida. 'Alta resolución' es la generación en dos pasos con img2img: use esta configuración cuando desee una imagen más grande y más coherente sin artefactos. tomar más tiempo de lo habitual txt2img.",
|
||||
"seed": "Los valores de semilla proporcionan un conjunto inicial de ruido que guían el proceso de eliminación de ruido y se pueden aleatorizar o rellenar con una semilla de una invocación anterior. La función Umbral se puede usar para mitigar resultados indeseables a valores CFG más altos (intente entre 0-10), y Perlin se puede usar para agregar ruido Perlin al proceso de eliminación de ruido. Ambos sirven para agregar variación a sus salidas.",
|
||||
"variations": "Pruebe una variación con una cantidad entre 0 y 1 para cambiar la imagen de salida para la semilla establecida. Se encuentran variaciones interesantes en la semilla entre 0.1 y 0.3.",
|
||||
"upscale": "Usando ESRGAN, puede aumentar la resolución de salida sin requerir un ancho/alto más alto en la generación inicial.",
|
||||
"faceCorrection": "Usando GFPGAN o Codeformer, la corrección de rostros intentará identificar rostros en las salidas y corregir cualquier defecto/anormalidad. Los valores de fuerza más altos aplicarán una presión correctiva más fuerte en las salidas, lo que resultará en rostros más atractivos. Con Codeformer, una mayor fidelidad intentará preservar la imagen original, a expensas de la fuerza de corrección de rostros.",
|
||||
"imageToImage": "Imagen a Imagen permite cargar una imagen inicial, que InvokeAI usará para guiar el proceso de generación, junto con una solicitud. Un valor más bajo para esta configuración se parecerá más a la imagen original. Se aceptan valores entre 0-1, y se recomienda un rango de .25-.75",
|
||||
"boundingBox": "La caja delimitadora es análoga a las configuraciones de Ancho y Alto para Texto a Imagen o Imagen a Imagen. Solo se procesará el área en la caja.",
|
||||
"seamCorrection": "Controla el manejo de parches visibles que pueden ocurrir cuando se pega una imagen generada de nuevo en el lienzo.",
|
||||
"infillAndScaling": "Administra los métodos de relleno (utilizados en áreas enmascaradas o borradas del lienzo) y la escala (útil para tamaños de caja delimitadora pequeños)."
|
||||
}
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"layer": "Capa",
|
||||
"base": "Base",
|
||||
"mask": "Máscara",
|
||||
"maskingOptions": "Opciones de máscara",
|
||||
"enableMask": "Habilitar Máscara",
|
||||
"preserveMaskedArea": "Preservar área enmascarada",
|
||||
"clearMask": "Limpiar máscara",
|
||||
"brush": "Pincel",
|
||||
"eraser": "Borrador",
|
||||
"fillBoundingBox": "Rellenar Caja Contenedora",
|
||||
"eraseBoundingBox": "Eliminar Caja Contenedora",
|
||||
"colorPicker": "Selector de color",
|
||||
"brushOptions": "Opciones de pincel",
|
||||
"brushSize": "Tamaño",
|
||||
"move": "Mover",
|
||||
"resetView": "Restablecer vista",
|
||||
"mergeVisible": "Consolidar vista",
|
||||
"saveToGallery": "Guardar en galería",
|
||||
"copyToClipboard": "Copiar al portapapeles",
|
||||
"downloadAsImage": "Descargar como imagen",
|
||||
"undo": "Deshacer",
|
||||
"redo": "Rehacer",
|
||||
"clearCanvas": "Limpiar lienzo",
|
||||
"canvasSettings": "Ajustes de lienzo",
|
||||
"showIntermediates": "Mostrar intermedios",
|
||||
"showGrid": "Mostrar cuadrícula",
|
||||
"snapToGrid": "Ajustar a cuadrícula",
|
||||
"darkenOutsideSelection": "Oscurecer fuera de la selección",
|
||||
"autoSaveToGallery": "Guardar automáticamente en galería",
|
||||
"saveBoxRegionOnly": "Guardar solo región dentro de la caja",
|
||||
"limitStrokesToBox": "Limitar trazos a la caja",
|
||||
"showCanvasDebugInfo": "Mostrar la información adicional del lienzo",
|
||||
"clearCanvasHistory": "Limpiar historial de lienzo",
|
||||
"clearHistory": "Limpiar historial",
|
||||
"clearCanvasHistoryMessage": "Limpiar el historial de lienzo también restablece completamente el lienzo unificado. Esto incluye todo el historial de deshacer/rehacer, las imágenes en el área de preparación y la capa base del lienzo.",
|
||||
"clearCanvasHistoryConfirm": "¿Está seguro de que desea limpiar el historial del lienzo?",
|
||||
"emptyTempImageFolder": "Vaciar directorio de imágenes temporales",
|
||||
"emptyFolder": "Vaciar directorio",
|
||||
"emptyTempImagesFolderMessage": "Vaciar el directorio de imágenes temporales también restablece completamente el lienzo unificado. Esto incluye todo el historial de deshacer/rehacer, las imágenes en el área de preparación y la capa base del lienzo.",
|
||||
"emptyTempImagesFolderConfirm": "¿Está seguro de que desea vaciar el directorio temporal?",
|
||||
"activeLayer": "Capa activa",
|
||||
"canvasScale": "Escala de lienzo",
|
||||
"boundingBox": "Caja contenedora",
|
||||
"scaledBoundingBox": "Caja contenedora escalada",
|
||||
"boundingBoxPosition": "Posición de caja contenedora",
|
||||
"canvasDimensions": "Dimensiones de lienzo",
|
||||
"canvasPosition": "Posición de lienzo",
|
||||
"cursorPosition": "Posición del cursor",
|
||||
"previous": "Anterior",
|
||||
"next": "Siguiente",
|
||||
"accept": "Aceptar",
|
||||
"showHide": "Mostrar/Ocultar",
|
||||
"discardAll": "Descartar todo",
|
||||
"betaClear": "Limpiar",
|
||||
"betaDarkenOutside": "Oscurecer fuera",
|
||||
"betaLimitToBox": "Limitar a caja",
|
||||
"betaPreserveMasked": "Preservar área enmascarada",
|
||||
"antialiasing": "Suavizado"
|
||||
},
|
||||
"accessibility": {
|
||||
"invokeProgressBar": "Activar la barra de progreso",
|
||||
"modelSelect": "Seleccionar modelo",
|
||||
"reset": "Reiniciar",
|
||||
"uploadImage": "Cargar imagen",
|
||||
"previousImage": "Imagen anterior",
|
||||
"nextImage": "Siguiente imagen",
|
||||
"useThisParameter": "Utiliza este parámetro",
|
||||
"copyMetadataJson": "Copiar los metadatos JSON",
|
||||
"exitViewer": "Salir del visor",
|
||||
"zoomIn": "Acercar",
|
||||
"zoomOut": "Alejar",
|
||||
"rotateCounterClockwise": "Girar en sentido antihorario",
|
||||
"rotateClockwise": "Girar en sentido horario",
|
||||
"flipHorizontally": "Voltear horizontalmente",
|
||||
"flipVertically": "Voltear verticalmente",
|
||||
"modifyConfig": "Modificar la configuración",
|
||||
"toggleAutoscroll": "Activar el autodesplazamiento",
|
||||
"toggleLogViewer": "Alternar el visor de registros",
|
||||
"showOptionsPanel": "Mostrar el panel lateral",
|
||||
"menu": "Menú"
|
||||
},
|
||||
"ui": {
|
||||
"hideProgressImages": "Ocultar el progreso de la imagen",
|
||||
"showProgressImages": "Mostrar el progreso de la imagen",
|
||||
"swapSizes": "Cambiar los tamaños",
|
||||
"lockRatio": "Proporción del bloqueo"
|
||||
},
|
||||
"nodes": {
|
||||
"showGraphNodes": "Mostrar la superposición de los gráficos",
|
||||
"zoomInNodes": "Acercar",
|
||||
"hideMinimapnodes": "Ocultar el minimapa",
|
||||
"fitViewportNodes": "Ajustar la vista",
|
||||
"zoomOutNodes": "Alejar",
|
||||
"hideGraphNodes": "Ocultar la superposición de los gráficos",
|
||||
"hideLegendNodes": "Ocultar la leyenda del tipo de campo",
|
||||
"showLegendNodes": "Mostrar la leyenda del tipo de campo",
|
||||
"showMinimapnodes": "Mostrar el minimapa",
|
||||
"reloadNodeTemplates": "Recargar las plantillas de nodos",
|
||||
"loadWorkflow": "Cargar el flujo de trabajo",
|
||||
"resetWorkflow": "Reiniciar e flujo de trabajo",
|
||||
"resetWorkflowDesc": "¿Está seguro de que deseas restablecer este flujo de trabajo?",
|
||||
"resetWorkflowDesc2": "Al reiniciar el flujo de trabajo se borrarán todos los nodos, aristas y detalles del flujo de trabajo.",
|
||||
"downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON"
|
||||
}
|
||||
}
|
||||
114
invokeai/frontend/web/dist/locales/fi.json
vendored
Normal file
114
invokeai/frontend/web/dist/locales/fi.json
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
{
|
||||
"accessibility": {
|
||||
"reset": "Resetoi",
|
||||
"useThisParameter": "Käytä tätä parametria",
|
||||
"modelSelect": "Mallin Valinta",
|
||||
"exitViewer": "Poistu katselimesta",
|
||||
"uploadImage": "Lataa kuva",
|
||||
"copyMetadataJson": "Kopioi metadata JSON:iin",
|
||||
"invokeProgressBar": "Invoken edistymispalkki",
|
||||
"nextImage": "Seuraava kuva",
|
||||
"previousImage": "Edellinen kuva",
|
||||
"zoomIn": "Lähennä",
|
||||
"flipHorizontally": "Käännä vaakasuoraan",
|
||||
"zoomOut": "Loitonna",
|
||||
"rotateCounterClockwise": "Kierrä vastapäivään",
|
||||
"rotateClockwise": "Kierrä myötäpäivään",
|
||||
"flipVertically": "Käännä pystysuoraan",
|
||||
"modifyConfig": "Muokkaa konfiguraatiota",
|
||||
"toggleAutoscroll": "Kytke automaattinen vieritys",
|
||||
"toggleLogViewer": "Kytke lokin katselutila",
|
||||
"showOptionsPanel": "Näytä asetukset"
|
||||
},
|
||||
"common": {
|
||||
"postProcessDesc2": "Erillinen käyttöliittymä tullaan julkaisemaan helpottaaksemme työnkulkua jälkikäsittelyssä.",
|
||||
"training": "Kouluta",
|
||||
"statusLoadingModel": "Ladataan mallia",
|
||||
"statusModelChanged": "Malli vaihdettu",
|
||||
"statusConvertingModel": "Muunnetaan mallia",
|
||||
"statusModelConverted": "Malli muunnettu",
|
||||
"langFrench": "Ranska",
|
||||
"langItalian": "Italia",
|
||||
"languagePickerLabel": "Kielen valinta",
|
||||
"hotkeysLabel": "Pikanäppäimet",
|
||||
"reportBugLabel": "Raportoi Bugista",
|
||||
"langPolish": "Puola",
|
||||
"langDutch": "Hollanti",
|
||||
"settingsLabel": "Asetukset",
|
||||
"githubLabel": "Github",
|
||||
"langGerman": "Saksa",
|
||||
"langPortuguese": "Portugali",
|
||||
"discordLabel": "Discord",
|
||||
"langEnglish": "Englanti",
|
||||
"langRussian": "Venäjä",
|
||||
"langUkranian": "Ukraina",
|
||||
"langSpanish": "Espanja",
|
||||
"upload": "Lataa",
|
||||
"statusMergedModels": "Mallit yhdistelty",
|
||||
"img2img": "Kuva kuvaksi",
|
||||
"nodes": "Solmut",
|
||||
"nodesDesc": "Solmupohjainen järjestelmä kuvien generoimiseen on parhaillaan kehitteillä. Pysy kuulolla päivityksistä tähän uskomattomaan ominaisuuteen liittyen.",
|
||||
"postProcessDesc1": "Invoke AI tarjoaa monenlaisia jälkikäsittelyominaisuukisa. Kuvan laadun skaalaus sekä kasvojen korjaus ovat jo saatavilla WebUI:ssä. Voit ottaa ne käyttöön lisäasetusten valikosta teksti kuvaksi sekä kuva kuvaksi -välilehdiltä. Voit myös suoraan prosessoida kuvia käyttämällä kuvan toimintapainikkeita nykyisen kuvan yläpuolella tai tarkastelussa.",
|
||||
"postprocessing": "Jälkikäsitellään",
|
||||
"postProcessing": "Jälkikäsitellään",
|
||||
"cancel": "Peruuta",
|
||||
"close": "Sulje",
|
||||
"accept": "Hyväksy",
|
||||
"statusConnected": "Yhdistetty",
|
||||
"statusError": "Virhe",
|
||||
"statusProcessingComplete": "Prosessointi valmis",
|
||||
"load": "Lataa",
|
||||
"back": "Takaisin",
|
||||
"statusGeneratingTextToImage": "Generoidaan tekstiä kuvaksi",
|
||||
"trainingDesc2": "InvokeAI tukee jo mukautettujen upotusten kouluttamista tekstin inversiolla käyttäen pääskriptiä.",
|
||||
"statusDisconnected": "Yhteys katkaistu",
|
||||
"statusPreparing": "Valmistellaan",
|
||||
"statusIterationComplete": "Iteraatio valmis",
|
||||
"statusMergingModels": "Yhdistellään malleja",
|
||||
"statusProcessingCanceled": "Valmistelu peruutettu",
|
||||
"statusSavingImage": "Tallennetaan kuvaa",
|
||||
"statusGeneratingImageToImage": "Generoidaan kuvaa kuvaksi",
|
||||
"statusRestoringFacesGFPGAN": "Korjataan kasvoja (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Korjataan kasvoja (CodeFormer)",
|
||||
"statusGeneratingInpainting": "Generoidaan sisällemaalausta",
|
||||
"statusGeneratingOutpainting": "Generoidaan ulosmaalausta",
|
||||
"statusRestoringFaces": "Korjataan kasvoja",
|
||||
"loadingInvokeAI": "Ladataan Invoke AI:ta",
|
||||
"loading": "Ladataan",
|
||||
"statusGenerating": "Generoidaan",
|
||||
"txt2img": "Teksti kuvaksi",
|
||||
"trainingDesc1": "Erillinen työnkulku omien upotusten ja tarkastuspisteiden kouluttamiseksi käyttäen tekstin inversiota ja dreamboothia selaimen käyttöliittymässä.",
|
||||
"postProcessDesc3": "Invoke AI:n komentorivi tarjoaa paljon muita ominaisuuksia, kuten esimerkiksi Embiggenin.",
|
||||
"unifiedCanvas": "Yhdistetty kanvas",
|
||||
"statusGenerationComplete": "Generointi valmis"
|
||||
},
|
||||
"gallery": {
|
||||
"uploads": "Lataukset",
|
||||
"showUploads": "Näytä lataukset",
|
||||
"galleryImageResetSize": "Resetoi koko",
|
||||
"maintainAspectRatio": "Säilytä kuvasuhde",
|
||||
"galleryImageSize": "Kuvan koko",
|
||||
"showGenerations": "Näytä generaatiot",
|
||||
"singleColumnLayout": "Yhden sarakkeen asettelu",
|
||||
"generations": "Generoinnit",
|
||||
"gallerySettings": "Gallerian asetukset",
|
||||
"autoSwitchNewImages": "Vaihda uusiin kuviin automaattisesti",
|
||||
"allImagesLoaded": "Kaikki kuvat ladattu",
|
||||
"noImagesInGallery": "Ei kuvia galleriassa",
|
||||
"loadMore": "Lataa lisää"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "näppäimistön pikavalinnat",
|
||||
"appHotkeys": "Sovelluksen pikanäppäimet",
|
||||
"generalHotkeys": "Yleiset pikanäppäimet",
|
||||
"galleryHotkeys": "Gallerian pikanäppäimet",
|
||||
"unifiedCanvasHotkeys": "Yhdistetyn kanvaan pikanäppäimet",
|
||||
"cancel": {
|
||||
"desc": "Peruuta kuvan luominen",
|
||||
"title": "Peruuta"
|
||||
},
|
||||
"invoke": {
|
||||
"desc": "Luo kuva"
|
||||
}
|
||||
}
|
||||
}
|
||||
531
invokeai/frontend/web/dist/locales/fr.json
vendored
Normal file
531
invokeai/frontend/web/dist/locales/fr.json
vendored
Normal file
@@ -0,0 +1,531 @@
|
||||
{
|
||||
"common": {
|
||||
"hotkeysLabel": "Raccourcis clavier",
|
||||
"languagePickerLabel": "Sélecteur de langue",
|
||||
"reportBugLabel": "Signaler un bug",
|
||||
"settingsLabel": "Paramètres",
|
||||
"img2img": "Image en image",
|
||||
"unifiedCanvas": "Canvas unifié",
|
||||
"nodes": "Nœuds",
|
||||
"langFrench": "Français",
|
||||
"nodesDesc": "Un système basé sur les nœuds pour la génération d'images est actuellement en développement. Restez à l'écoute pour des mises à jour à ce sujet.",
|
||||
"postProcessing": "Post-traitement",
|
||||
"postProcessDesc1": "Invoke AI offre une grande variété de fonctionnalités de post-traitement. Le redimensionnement d'images et la restauration de visages sont déjà disponibles dans la WebUI. Vous pouvez y accéder à partir du menu 'Options avancées' des onglets 'Texte vers image' et 'Image vers image'. Vous pouvez également traiter les images directement en utilisant les boutons d'action d'image au-dessus de l'affichage d'image actuel ou dans le visualiseur.",
|
||||
"postProcessDesc2": "Une interface dédiée sera bientôt disponible pour faciliter les workflows de post-traitement plus avancés.",
|
||||
"postProcessDesc3": "L'interface en ligne de commande d'Invoke AI offre diverses autres fonctionnalités, notamment Embiggen.",
|
||||
"training": "Formation",
|
||||
"trainingDesc1": "Un workflow dédié pour former vos propres embeddings et checkpoints en utilisant Textual Inversion et Dreambooth depuis l'interface web.",
|
||||
"trainingDesc2": "InvokeAI prend déjà en charge la formation d'embeddings personnalisés en utilisant Textual Inversion en utilisant le script principal.",
|
||||
"upload": "Télécharger",
|
||||
"close": "Fermer",
|
||||
"load": "Charger",
|
||||
"back": "Retour",
|
||||
"statusConnected": "En ligne",
|
||||
"statusDisconnected": "Hors ligne",
|
||||
"statusError": "Erreur",
|
||||
"statusPreparing": "Préparation",
|
||||
"statusProcessingCanceled": "Traitement annulé",
|
||||
"statusProcessingComplete": "Traitement terminé",
|
||||
"statusGenerating": "Génération",
|
||||
"statusGeneratingTextToImage": "Génération Texte vers Image",
|
||||
"statusGeneratingImageToImage": "Génération Image vers Image",
|
||||
"statusGeneratingInpainting": "Génération de réparation",
|
||||
"statusGeneratingOutpainting": "Génération de complétion",
|
||||
"statusGenerationComplete": "Génération terminée",
|
||||
"statusIterationComplete": "Itération terminée",
|
||||
"statusSavingImage": "Sauvegarde de l'image",
|
||||
"statusRestoringFaces": "Restauration des visages",
|
||||
"statusRestoringFacesGFPGAN": "Restauration des visages (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restauration des visages (CodeFormer)",
|
||||
"statusUpscaling": "Mise à échelle",
|
||||
"statusUpscalingESRGAN": "Mise à échelle (ESRGAN)",
|
||||
"statusLoadingModel": "Chargement du modèle",
|
||||
"statusModelChanged": "Modèle changé",
|
||||
"discordLabel": "Discord",
|
||||
"githubLabel": "Github",
|
||||
"accept": "Accepter",
|
||||
"statusMergingModels": "Mélange des modèles",
|
||||
"loadingInvokeAI": "Chargement de Invoke AI",
|
||||
"cancel": "Annuler",
|
||||
"langEnglish": "Anglais",
|
||||
"statusConvertingModel": "Conversion du modèle",
|
||||
"statusModelConverted": "Modèle converti",
|
||||
"loading": "Chargement",
|
||||
"statusMergedModels": "Modèles mélangés",
|
||||
"txt2img": "Texte vers image",
|
||||
"postprocessing": "Post-Traitement"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Générations",
|
||||
"showGenerations": "Afficher les générations",
|
||||
"uploads": "Téléchargements",
|
||||
"showUploads": "Afficher les téléchargements",
|
||||
"galleryImageSize": "Taille de l'image",
|
||||
"galleryImageResetSize": "Réinitialiser la taille",
|
||||
"gallerySettings": "Paramètres de la galerie",
|
||||
"maintainAspectRatio": "Maintenir le rapport d'aspect",
|
||||
"autoSwitchNewImages": "Basculer automatiquement vers de nouvelles images",
|
||||
"singleColumnLayout": "Mise en page en colonne unique",
|
||||
"allImagesLoaded": "Toutes les images chargées",
|
||||
"loadMore": "Charger plus",
|
||||
"noImagesInGallery": "Aucune image dans la galerie"
|
||||
},
|
||||
"hotkeys": {
|
||||
"keyboardShortcuts": "Raccourcis clavier",
|
||||
"appHotkeys": "Raccourcis de l'application",
|
||||
"generalHotkeys": "Raccourcis généraux",
|
||||
"galleryHotkeys": "Raccourcis de la galerie",
|
||||
"unifiedCanvasHotkeys": "Raccourcis du canvas unifié",
|
||||
"invoke": {
|
||||
"title": "Invoquer",
|
||||
"desc": "Générer une image"
|
||||
},
|
||||
"cancel": {
|
||||
"title": "Annuler",
|
||||
"desc": "Annuler la génération d'image"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "Prompt de focus",
|
||||
"desc": "Mettre en focus la zone de saisie de la commande"
|
||||
},
|
||||
"toggleOptions": {
|
||||
"title": "Affichage des options",
|
||||
"desc": "Afficher et masquer le panneau d'options"
|
||||
},
|
||||
"pinOptions": {
|
||||
"title": "Epinglage des options",
|
||||
"desc": "Epingler le panneau d'options"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Affichage de la visionneuse",
|
||||
"desc": "Afficher et masquer la visionneuse d'image"
|
||||
},
|
||||
"toggleGallery": {
|
||||
"title": "Affichage de la galerie",
|
||||
"desc": "Afficher et masquer la galerie"
|
||||
},
|
||||
"maximizeWorkSpace": {
|
||||
"title": "Maximiser la zone de travail",
|
||||
"desc": "Fermer les panneaux et maximiser la zone de travail"
|
||||
},
|
||||
"changeTabs": {
|
||||
"title": "Changer d'onglet",
|
||||
"desc": "Passer à un autre espace de travail"
|
||||
},
|
||||
"consoleToggle": {
|
||||
"title": "Affichage de la console",
|
||||
"desc": "Afficher et masquer la console"
|
||||
},
|
||||
"setPrompt": {
|
||||
"title": "Définir le prompt",
|
||||
"desc": "Utiliser le prompt de l'image actuelle"
|
||||
},
|
||||
"setSeed": {
|
||||
"title": "Définir la graine",
|
||||
"desc": "Utiliser la graine de l'image actuelle"
|
||||
},
|
||||
"setParameters": {
|
||||
"title": "Définir les paramètres",
|
||||
"desc": "Utiliser tous les paramètres de l'image actuelle"
|
||||
},
|
||||
"restoreFaces": {
|
||||
"title": "Restaurer les visages",
|
||||
"desc": "Restaurer l'image actuelle"
|
||||
},
|
||||
"upscale": {
|
||||
"title": "Agrandir",
|
||||
"desc": "Agrandir l'image actuelle"
|
||||
},
|
||||
"showInfo": {
|
||||
"title": "Afficher les informations",
|
||||
"desc": "Afficher les informations de métadonnées de l'image actuelle"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "Envoyer à l'image à l'image",
|
||||
"desc": "Envoyer l'image actuelle à l'image à l'image"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "Supprimer l'image",
|
||||
"desc": "Supprimer l'image actuelle"
|
||||
},
|
||||
"closePanels": {
|
||||
"title": "Fermer les panneaux",
|
||||
"desc": "Fermer les panneaux ouverts"
|
||||
},
|
||||
"previousImage": {
|
||||
"title": "Image précédente",
|
||||
"desc": "Afficher l'image précédente dans la galerie"
|
||||
},
|
||||
"nextImage": {
|
||||
"title": "Image suivante",
|
||||
"desc": "Afficher l'image suivante dans la galerie"
|
||||
},
|
||||
"toggleGalleryPin": {
|
||||
"title": "Activer/désactiver l'épinglage de la galerie",
|
||||
"desc": "Épingle ou dépingle la galerie à l'interface"
|
||||
},
|
||||
"increaseGalleryThumbSize": {
|
||||
"title": "Augmenter la taille des miniatures de la galerie",
|
||||
"desc": "Augmente la taille des miniatures de la galerie"
|
||||
},
|
||||
"decreaseGalleryThumbSize": {
|
||||
"title": "Diminuer la taille des miniatures de la galerie",
|
||||
"desc": "Diminue la taille des miniatures de la galerie"
|
||||
},
|
||||
"selectBrush": {
|
||||
"title": "Sélectionner un pinceau",
|
||||
"desc": "Sélectionne le pinceau de la toile"
|
||||
},
|
||||
"selectEraser": {
|
||||
"title": "Sélectionner un gomme",
|
||||
"desc": "Sélectionne la gomme de la toile"
|
||||
},
|
||||
"decreaseBrushSize": {
|
||||
"title": "Diminuer la taille du pinceau",
|
||||
"desc": "Diminue la taille du pinceau/gomme de la toile"
|
||||
},
|
||||
"increaseBrushSize": {
|
||||
"title": "Augmenter la taille du pinceau",
|
||||
"desc": "Augmente la taille du pinceau/gomme de la toile"
|
||||
},
|
||||
"decreaseBrushOpacity": {
|
||||
"title": "Diminuer l'opacité du pinceau",
|
||||
"desc": "Diminue l'opacité du pinceau de la toile"
|
||||
},
|
||||
"increaseBrushOpacity": {
|
||||
"title": "Augmenter l'opacité du pinceau",
|
||||
"desc": "Augmente l'opacité du pinceau de la toile"
|
||||
},
|
||||
"moveTool": {
|
||||
"title": "Outil de déplacement",
|
||||
"desc": "Permet la navigation sur la toile"
|
||||
},
|
||||
"fillBoundingBox": {
|
||||
"title": "Remplir la boîte englobante",
|
||||
"desc": "Remplit la boîte englobante avec la couleur du pinceau"
|
||||
},
|
||||
"eraseBoundingBox": {
|
||||
"title": "Effacer la boîte englobante",
|
||||
"desc": "Efface la zone de la boîte englobante"
|
||||
},
|
||||
"colorPicker": {
|
||||
"title": "Sélectionnez le sélecteur de couleur",
|
||||
"desc": "Sélectionne le sélecteur de couleur de la toile"
|
||||
},
|
||||
"toggleSnap": {
|
||||
"title": "Basculer Snap",
|
||||
"desc": "Basculer Snap à la grille"
|
||||
},
|
||||
"quickToggleMove": {
|
||||
"title": "Basculer rapidement déplacer",
|
||||
"desc": "Basculer temporairement le mode Déplacer"
|
||||
},
|
||||
"toggleLayer": {
|
||||
"title": "Basculer la couche",
|
||||
"desc": "Basculer la sélection de la couche masque/base"
|
||||
},
|
||||
"clearMask": {
|
||||
"title": "Effacer le masque",
|
||||
"desc": "Effacer entièrement le masque"
|
||||
},
|
||||
"hideMask": {
|
||||
"title": "Masquer le masque",
|
||||
"desc": "Masquer et démasquer le masque"
|
||||
},
|
||||
"showHideBoundingBox": {
|
||||
"title": "Afficher/Masquer la boîte englobante",
|
||||
"desc": "Basculer la visibilité de la boîte englobante"
|
||||
},
|
||||
"mergeVisible": {
|
||||
"title": "Fusionner visible",
|
||||
"desc": "Fusionner toutes les couches visibles de la toile"
|
||||
},
|
||||
"saveToGallery": {
|
||||
"title": "Enregistrer dans la galerie",
|
||||
"desc": "Enregistrer la toile actuelle dans la galerie"
|
||||
},
|
||||
"copyToClipboard": {
|
||||
"title": "Copier dans le presse-papiers",
|
||||
"desc": "Copier la toile actuelle dans le presse-papiers"
|
||||
},
|
||||
"downloadImage": {
|
||||
"title": "Télécharger l'image",
|
||||
"desc": "Télécharger la toile actuelle"
|
||||
},
|
||||
"undoStroke": {
|
||||
"title": "Annuler le trait",
|
||||
"desc": "Annuler un coup de pinceau"
|
||||
},
|
||||
"redoStroke": {
|
||||
"title": "Rétablir le trait",
|
||||
"desc": "Rétablir un coup de pinceau"
|
||||
},
|
||||
"resetView": {
|
||||
"title": "Réinitialiser la vue",
|
||||
"desc": "Réinitialiser la vue de la toile"
|
||||
},
|
||||
"previousStagingImage": {
|
||||
"title": "Image de mise en scène précédente",
|
||||
"desc": "Image précédente de la zone de mise en scène"
|
||||
},
|
||||
"nextStagingImage": {
|
||||
"title": "Image de mise en scène suivante",
|
||||
"desc": "Image suivante de la zone de mise en scène"
|
||||
},
|
||||
"acceptStagingImage": {
|
||||
"title": "Accepter l'image de mise en scène",
|
||||
"desc": "Accepter l'image actuelle de la zone de mise en scène"
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
"modelManager": "Gestionnaire de modèle",
|
||||
"model": "Modèle",
|
||||
"allModels": "Tous les modèles",
|
||||
"checkpointModels": "Points de contrôle",
|
||||
"diffusersModels": "Diffuseurs",
|
||||
"safetensorModels": "SafeTensors",
|
||||
"modelAdded": "Modèle ajouté",
|
||||
"modelUpdated": "Modèle mis à jour",
|
||||
"modelEntryDeleted": "Entrée de modèle supprimée",
|
||||
"cannotUseSpaces": "Ne peut pas utiliser d'espaces",
|
||||
"addNew": "Ajouter un nouveau",
|
||||
"addNewModel": "Ajouter un nouveau modèle",
|
||||
"addCheckpointModel": "Ajouter un modèle de point de contrôle / SafeTensor",
|
||||
"addDiffuserModel": "Ajouter des diffuseurs",
|
||||
"addManually": "Ajouter manuellement",
|
||||
"manual": "Manuel",
|
||||
"name": "Nom",
|
||||
"nameValidationMsg": "Entrez un nom pour votre modèle",
|
||||
"description": "Description",
|
||||
"descriptionValidationMsg": "Ajoutez une description pour votre modèle",
|
||||
"config": "Config",
|
||||
"configValidationMsg": "Chemin vers le fichier de configuration de votre modèle.",
|
||||
"modelLocation": "Emplacement du modèle",
|
||||
"modelLocationValidationMsg": "Chemin vers où votre modèle est situé localement.",
|
||||
"repo_id": "ID de dépôt",
|
||||
"repoIDValidationMsg": "Dépôt en ligne de votre modèle",
|
||||
"vaeLocation": "Emplacement VAE",
|
||||
"vaeLocationValidationMsg": "Chemin vers où votre VAE est situé.",
|
||||
"vaeRepoID": "ID de dépôt VAE",
|
||||
"vaeRepoIDValidationMsg": "Dépôt en ligne de votre VAE",
|
||||
"width": "Largeur",
|
||||
"widthValidationMsg": "Largeur par défaut de votre modèle.",
|
||||
"height": "Hauteur",
|
||||
"heightValidationMsg": "Hauteur par défaut de votre modèle.",
|
||||
"addModel": "Ajouter un modèle",
|
||||
"updateModel": "Mettre à jour le modèle",
|
||||
"availableModels": "Modèles disponibles",
|
||||
"search": "Rechercher",
|
||||
"load": "Charger",
|
||||
"active": "actif",
|
||||
"notLoaded": "non chargé",
|
||||
"cached": "en cache",
|
||||
"checkpointFolder": "Dossier de point de contrôle",
|
||||
"clearCheckpointFolder": "Effacer le dossier de point de contrôle",
|
||||
"findModels": "Trouver des modèles",
|
||||
"scanAgain": "Scanner à nouveau",
|
||||
"modelsFound": "Modèles trouvés",
|
||||
"selectFolder": "Sélectionner un dossier",
|
||||
"selected": "Sélectionné",
|
||||
"selectAll": "Tout sélectionner",
|
||||
"deselectAll": "Tout désélectionner",
|
||||
"showExisting": "Afficher existant",
|
||||
"addSelected": "Ajouter sélectionné",
|
||||
"modelExists": "Modèle existant",
|
||||
"selectAndAdd": "Sélectionner et ajouter les modèles listés ci-dessous",
|
||||
"noModelsFound": "Aucun modèle trouvé",
|
||||
"delete": "Supprimer",
|
||||
"deleteModel": "Supprimer le modèle",
|
||||
"deleteConfig": "Supprimer la configuration",
|
||||
"deleteMsg1": "Voulez-vous vraiment supprimer cette entrée de modèle dans InvokeAI ?",
|
||||
"deleteMsg2": "Cela n'effacera pas le fichier de point de contrôle du modèle de votre disque. Vous pouvez les réajouter si vous le souhaitez.",
|
||||
"formMessageDiffusersModelLocation": "Emplacement du modèle de diffuseurs",
|
||||
"formMessageDiffusersModelLocationDesc": "Veuillez en entrer au moins un.",
|
||||
"formMessageDiffusersVAELocation": "Emplacement VAE",
|
||||
"formMessageDiffusersVAELocationDesc": "Si non fourni, InvokeAI recherchera le fichier VAE à l'emplacement du modèle donné ci-dessus."
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Images",
|
||||
"steps": "Etapes",
|
||||
"cfgScale": "CFG Echelle",
|
||||
"width": "Largeur",
|
||||
"height": "Hauteur",
|
||||
"seed": "Graine",
|
||||
"randomizeSeed": "Graine Aléatoire",
|
||||
"shuffle": "Mélanger",
|
||||
"noiseThreshold": "Seuil de Bruit",
|
||||
"perlinNoise": "Bruit de Perlin",
|
||||
"variations": "Variations",
|
||||
"variationAmount": "Montant de Variation",
|
||||
"seedWeights": "Poids des Graines",
|
||||
"faceRestoration": "Restauration de Visage",
|
||||
"restoreFaces": "Restaurer les Visages",
|
||||
"type": "Type",
|
||||
"strength": "Force",
|
||||
"upscaling": "Agrandissement",
|
||||
"upscale": "Agrandir",
|
||||
"upscaleImage": "Image en Agrandissement",
|
||||
"scale": "Echelle",
|
||||
"otherOptions": "Autres Options",
|
||||
"seamlessTiling": "Carreau Sans Joint",
|
||||
"hiresOptim": "Optimisation Haute Résolution",
|
||||
"imageFit": "Ajuster Image Initiale à la Taille de Sortie",
|
||||
"codeformerFidelity": "Fidélité",
|
||||
"scaleBeforeProcessing": "Echelle Avant Traitement",
|
||||
"scaledWidth": "Larg. Échelle",
|
||||
"scaledHeight": "Haut. Échelle",
|
||||
"infillMethod": "Méthode de Remplissage",
|
||||
"tileSize": "Taille des Tuiles",
|
||||
"boundingBoxHeader": "Boîte Englobante",
|
||||
"seamCorrectionHeader": "Correction des Joints",
|
||||
"infillScalingHeader": "Remplissage et Mise à l'Échelle",
|
||||
"img2imgStrength": "Force de l'Image à l'Image",
|
||||
"toggleLoopback": "Activer/Désactiver la Boucle",
|
||||
"sendTo": "Envoyer à",
|
||||
"sendToImg2Img": "Envoyer à Image à Image",
|
||||
"sendToUnifiedCanvas": "Envoyer au Canvas Unifié",
|
||||
"copyImage": "Copier Image",
|
||||
"copyImageToLink": "Copier l'Image en Lien",
|
||||
"downloadImage": "Télécharger Image",
|
||||
"openInViewer": "Ouvrir dans le visualiseur",
|
||||
"closeViewer": "Fermer le visualiseur",
|
||||
"usePrompt": "Utiliser la suggestion",
|
||||
"useSeed": "Utiliser la graine",
|
||||
"useAll": "Tout utiliser",
|
||||
"useInitImg": "Utiliser l'image initiale",
|
||||
"info": "Info",
|
||||
"initialImage": "Image initiale",
|
||||
"showOptionsPanel": "Afficher le panneau d'options"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modèles",
|
||||
"displayInProgress": "Afficher les images en cours",
|
||||
"saveSteps": "Enregistrer les images tous les n étapes",
|
||||
"confirmOnDelete": "Confirmer la suppression",
|
||||
"displayHelpIcons": "Afficher les icônes d'aide",
|
||||
"enableImageDebugging": "Activer le débogage d'image",
|
||||
"resetWebUI": "Réinitialiser l'interface Web",
|
||||
"resetWebUIDesc1": "Réinitialiser l'interface Web ne réinitialise que le cache local du navigateur de vos images et de vos paramètres enregistrés. Cela n'efface pas les images du disque.",
|
||||
"resetWebUIDesc2": "Si les images ne s'affichent pas dans la galerie ou si quelque chose d'autre ne fonctionne pas, veuillez essayer de réinitialiser avant de soumettre une demande sur GitHub.",
|
||||
"resetComplete": "L'interface Web a été réinitialisée. Rafraîchissez la page pour recharger."
|
||||
},
|
||||
"toast": {
|
||||
"tempFoldersEmptied": "Dossiers temporaires vidés",
|
||||
"uploadFailed": "Téléchargement échoué",
|
||||
"uploadFailedUnableToLoadDesc": "Impossible de charger le fichier",
|
||||
"downloadImageStarted": "Téléchargement de l'image démarré",
|
||||
"imageCopied": "Image copiée",
|
||||
"imageLinkCopied": "Lien d'image copié",
|
||||
"imageNotLoaded": "Aucune image chargée",
|
||||
"imageNotLoadedDesc": "Aucune image trouvée pour envoyer à module d'image",
|
||||
"imageSavedToGallery": "Image enregistrée dans la galerie",
|
||||
"canvasMerged": "Canvas fusionné",
|
||||
"sentToImageToImage": "Envoyé à Image à Image",
|
||||
"sentToUnifiedCanvas": "Envoyé à Canvas unifié",
|
||||
"parametersSet": "Paramètres définis",
|
||||
"parametersNotSet": "Paramètres non définis",
|
||||
"parametersNotSetDesc": "Aucune métadonnée trouvée pour cette image.",
|
||||
"parametersFailed": "Problème de chargement des paramètres",
|
||||
"parametersFailedDesc": "Impossible de charger l'image d'initiation.",
|
||||
"seedSet": "Graine définie",
|
||||
"seedNotSet": "Graine non définie",
|
||||
"seedNotSetDesc": "Impossible de trouver la graine pour cette image.",
|
||||
"promptSet": "Invite définie",
|
||||
"promptNotSet": "Invite non définie",
|
||||
"promptNotSetDesc": "Impossible de trouver l'invite pour cette image.",
|
||||
"upscalingFailed": "Échec de la mise à l'échelle",
|
||||
"faceRestoreFailed": "Échec de la restauration du visage",
|
||||
"metadataLoadFailed": "Échec du chargement des métadonnées",
|
||||
"initialImageSet": "Image initiale définie",
|
||||
"initialImageNotSet": "Image initiale non définie",
|
||||
"initialImageNotSetDesc": "Impossible de charger l'image initiale"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
"prompt": "Ceci est le champ prompt. Le prompt inclut des objets de génération et des termes stylistiques. Vous pouvez également ajouter un poids (importance du jeton) dans le prompt, mais les commandes CLI et les paramètres ne fonctionneront pas.",
|
||||
"gallery": "La galerie affiche les générations à partir du dossier de sortie à mesure qu'elles sont créées. Les paramètres sont stockés dans des fichiers et accessibles via le menu contextuel.",
|
||||
"other": "Ces options activent des modes de traitement alternatifs pour Invoke. 'Tuilage seamless' créera des motifs répétitifs dans la sortie. 'Haute résolution' est la génération en deux étapes avec img2img : utilisez ce paramètre lorsque vous souhaitez une image plus grande et plus cohérente sans artefacts. Cela prendra plus de temps que d'habitude txt2img.",
|
||||
"seed": "La valeur de grain affecte le bruit initial à partir duquel l'image est formée. Vous pouvez utiliser les graines déjà existantes provenant d'images précédentes. 'Seuil de bruit' est utilisé pour atténuer les artefacts à des valeurs CFG élevées (essayez la plage de 0 à 10), et Perlin pour ajouter du bruit Perlin pendant la génération : les deux servent à ajouter de la variété à vos sorties.",
|
||||
"variations": "Essayez une variation avec une valeur comprise entre 0,1 et 1,0 pour changer le résultat pour une graine donnée. Des variations intéressantes de la graine sont entre 0,1 et 0,3.",
|
||||
"upscale": "Utilisez ESRGAN pour agrandir l'image immédiatement après la génération.",
|
||||
"faceCorrection": "Correction de visage avec GFPGAN ou Codeformer : l'algorithme détecte les visages dans l'image et corrige tout défaut. La valeur élevée changera plus l'image, ce qui donnera des visages plus attirants. Codeformer avec une fidélité plus élevée préserve l'image originale au prix d'une correction de visage plus forte.",
|
||||
"imageToImage": "Image to Image charge n'importe quelle image en tant qu'initiale, qui est ensuite utilisée pour générer une nouvelle avec le prompt. Plus la valeur est élevée, plus l'image de résultat changera. Des valeurs de 0,0 à 1,0 sont possibles, la plage recommandée est de 0,25 à 0,75",
|
||||
"boundingBox": "La boîte englobante est la même que les paramètres Largeur et Hauteur pour Texte à Image ou Image à Image. Seulement la zone dans la boîte sera traitée.",
|
||||
"seamCorrection": "Contrôle la gestion des coutures visibles qui se produisent entre les images générées sur la toile.",
|
||||
"infillAndScaling": "Gérer les méthodes de remplissage (utilisées sur les zones masquées ou effacées de la toile) et le redimensionnement (utile pour les petites tailles de boîte englobante)."
|
||||
}
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"layer": "Couche",
|
||||
"base": "Base",
|
||||
"mask": "Masque",
|
||||
"maskingOptions": "Options de masquage",
|
||||
"enableMask": "Activer le masque",
|
||||
"preserveMaskedArea": "Préserver la zone masquée",
|
||||
"clearMask": "Effacer le masque",
|
||||
"brush": "Pinceau",
|
||||
"eraser": "Gomme",
|
||||
"fillBoundingBox": "Remplir la boîte englobante",
|
||||
"eraseBoundingBox": "Effacer la boîte englobante",
|
||||
"colorPicker": "Sélecteur de couleur",
|
||||
"brushOptions": "Options de pinceau",
|
||||
"brushSize": "Taille",
|
||||
"move": "Déplacer",
|
||||
"resetView": "Réinitialiser la vue",
|
||||
"mergeVisible": "Fusionner les visibles",
|
||||
"saveToGallery": "Enregistrer dans la galerie",
|
||||
"copyToClipboard": "Copier dans le presse-papiers",
|
||||
"downloadAsImage": "Télécharger en tant qu'image",
|
||||
"undo": "Annuler",
|
||||
"redo": "Refaire",
|
||||
"clearCanvas": "Effacer le canvas",
|
||||
"canvasSettings": "Paramètres du canvas",
|
||||
"showIntermediates": "Afficher les intermédiaires",
|
||||
"showGrid": "Afficher la grille",
|
||||
"snapToGrid": "Aligner sur la grille",
|
||||
"darkenOutsideSelection": "Assombrir à l'extérieur de la sélection",
|
||||
"autoSaveToGallery": "Enregistrement automatique dans la galerie",
|
||||
"saveBoxRegionOnly": "Enregistrer uniquement la région de la boîte",
|
||||
"limitStrokesToBox": "Limiter les traits à la boîte",
|
||||
"showCanvasDebugInfo": "Afficher les informations de débogage du canvas",
|
||||
"clearCanvasHistory": "Effacer l'historique du canvas",
|
||||
"clearHistory": "Effacer l'historique",
|
||||
"clearCanvasHistoryMessage": "Effacer l'historique du canvas laisse votre canvas actuel intact, mais efface de manière irréversible l'historique annuler et refaire.",
|
||||
"clearCanvasHistoryConfirm": "Voulez-vous vraiment effacer l'historique du canvas ?",
|
||||
"emptyTempImageFolder": "Vider le dossier d'images temporaires",
|
||||
"emptyFolder": "Vider le dossier",
|
||||
"emptyTempImagesFolderMessage": "Vider le dossier d'images temporaires réinitialise également complètement le canvas unifié. Cela inclut tout l'historique annuler/refaire, les images dans la zone de mise en attente et la couche de base du canvas.",
|
||||
"emptyTempImagesFolderConfirm": "Voulez-vous vraiment vider le dossier temporaire ?",
|
||||
"activeLayer": "Calque actif",
|
||||
"canvasScale": "Échelle du canevas",
|
||||
"boundingBox": "Boîte englobante",
|
||||
"scaledBoundingBox": "Boîte englobante mise à l'échelle",
|
||||
"boundingBoxPosition": "Position de la boîte englobante",
|
||||
"canvasDimensions": "Dimensions du canevas",
|
||||
"canvasPosition": "Position du canevas",
|
||||
"cursorPosition": "Position du curseur",
|
||||
"previous": "Précédent",
|
||||
"next": "Suivant",
|
||||
"accept": "Accepter",
|
||||
"showHide": "Afficher/Masquer",
|
||||
"discardAll": "Tout abandonner",
|
||||
"betaClear": "Effacer",
|
||||
"betaDarkenOutside": "Assombrir à l'extérieur",
|
||||
"betaLimitToBox": "Limiter à la boîte",
|
||||
"betaPreserveMasked": "Conserver masqué"
|
||||
},
|
||||
"accessibility": {
|
||||
"uploadImage": "Charger une image",
|
||||
"reset": "Réinitialiser",
|
||||
"nextImage": "Image suivante",
|
||||
"previousImage": "Image précédente",
|
||||
"useThisParameter": "Utiliser ce paramètre",
|
||||
"zoomIn": "Zoom avant",
|
||||
"zoomOut": "Zoom arrière",
|
||||
"showOptionsPanel": "Montrer la page d'options",
|
||||
"modelSelect": "Choix du modèle",
|
||||
"invokeProgressBar": "Barre de Progression Invoke",
|
||||
"copyMetadataJson": "Copie des métadonnées JSON",
|
||||
"menu": "Menu"
|
||||
}
|
||||
}
|
||||
575
invokeai/frontend/web/dist/locales/he.json
vendored
Normal file
575
invokeai/frontend/web/dist/locales/he.json
vendored
Normal file
@@ -0,0 +1,575 @@
|
||||
{
|
||||
"modelManager": {
|
||||
"cannotUseSpaces": "לא ניתן להשתמש ברווחים",
|
||||
"addNew": "הוסף חדש",
|
||||
"vaeLocationValidationMsg": "נתיב למקום שבו ממוקם ה- VAE שלך.",
|
||||
"height": "גובה",
|
||||
"load": "טען",
|
||||
"search": "חיפוש",
|
||||
"heightValidationMsg": "גובה ברירת המחדל של המודל שלך.",
|
||||
"addNewModel": "הוסף מודל חדש",
|
||||
"allModels": "כל המודלים",
|
||||
"checkpointModels": "נקודות ביקורת",
|
||||
"diffusersModels": "מפזרים",
|
||||
"safetensorModels": "טנסורים בטוחים",
|
||||
"modelAdded": "מודל התווסף",
|
||||
"modelUpdated": "מודל עודכן",
|
||||
"modelEntryDeleted": "רשומת המודל נמחקה",
|
||||
"addCheckpointModel": "הוסף נקודת ביקורת / מודל טנסור בטוח",
|
||||
"addDiffuserModel": "הוסף מפזרים",
|
||||
"addManually": "הוספה ידנית",
|
||||
"manual": "ידני",
|
||||
"name": "שם",
|
||||
"description": "תיאור",
|
||||
"descriptionValidationMsg": "הוסף תיאור למודל שלך",
|
||||
"config": "תצורה",
|
||||
"configValidationMsg": "נתיב לקובץ התצורה של המודל שלך.",
|
||||
"modelLocation": "מיקום המודל",
|
||||
"modelLocationValidationMsg": "נתיב למקום שבו המודל שלך ממוקם באופן מקומי.",
|
||||
"repo_id": "מזהה מאגר",
|
||||
"repoIDValidationMsg": "מאגר מקוון של המודל שלך",
|
||||
"vaeLocation": "מיקום VAE",
|
||||
"vaeRepoIDValidationMsg": "המאגר המקוון של VAE שלך",
|
||||
"width": "רוחב",
|
||||
"widthValidationMsg": "רוחב ברירת המחדל של המודל שלך.",
|
||||
"addModel": "הוסף מודל",
|
||||
"updateModel": "עדכן מודל",
|
||||
"active": "פעיל",
|
||||
"modelsFound": "מודלים נמצאו",
|
||||
"cached": "נשמר במטמון",
|
||||
"checkpointFolder": "תיקיית נקודות ביקורת",
|
||||
"findModels": "מצא מודלים",
|
||||
"scanAgain": "סרוק מחדש",
|
||||
"selectFolder": "בחירת תיקייה",
|
||||
"selected": "נבחר",
|
||||
"selectAll": "בחר הכל",
|
||||
"deselectAll": "ביטול בחירת הכל",
|
||||
"showExisting": "הצג קיים",
|
||||
"addSelected": "הוסף פריטים שנבחרו",
|
||||
"modelExists": "המודל קיים",
|
||||
"selectAndAdd": "בחר והוסך מודלים המפורטים להלן",
|
||||
"deleteModel": "מחיקת מודל",
|
||||
"deleteConfig": "מחיקת תצורה",
|
||||
"formMessageDiffusersModelLocation": "מיקום מפזרי המודל",
|
||||
"formMessageDiffusersModelLocationDesc": "נא להזין לפחות אחד.",
|
||||
"convertToDiffusersHelpText5": "אנא ודא/י שיש לך מספיק מקום בדיסק. גדלי מודלים בדרך כלל הינם בין 4GB-7GB.",
|
||||
"convertToDiffusersHelpText1": "מודל זה יומר לפורמט 🧨 המפזרים.",
|
||||
"convertToDiffusersHelpText2": "תהליך זה יחליף את הרשומה של מנהל המודלים שלך בגרסת המפזרים של אותו המודל.",
|
||||
"convertToDiffusersHelpText6": "האם ברצונך להמיר מודל זה?",
|
||||
"convertToDiffusersSaveLocation": "שמירת מיקום",
|
||||
"inpainting": "v1 צביעת תוך",
|
||||
"statusConverting": "ממיר",
|
||||
"modelConverted": "מודל הומר",
|
||||
"sameFolder": "אותה תיקיה",
|
||||
"custom": "התאמה אישית",
|
||||
"merge": "מזג",
|
||||
"modelsMerged": "מודלים מוזגו",
|
||||
"mergeModels": "מזג מודלים",
|
||||
"modelOne": "מודל 1",
|
||||
"customSaveLocation": "מיקום שמירה מותאם אישית",
|
||||
"alpha": "אלפא",
|
||||
"mergedModelSaveLocation": "שמירת מיקום",
|
||||
"mergedModelCustomSaveLocation": "נתיב מותאם אישית",
|
||||
"ignoreMismatch": "התעלמות מאי-התאמות בין מודלים שנבחרו",
|
||||
"modelMergeHeaderHelp1": "ניתן למזג עד שלושה מודלים שונים כדי ליצור שילוב שמתאים לצרכים שלכם.",
|
||||
"modelMergeAlphaHelp": "אלפא שולט בחוזק מיזוג עבור המודלים. ערכי אלפא נמוכים יותר מובילים להשפעה נמוכה יותר של המודל השני.",
|
||||
"nameValidationMsg": "הכנס שם למודל שלך",
|
||||
"vaeRepoID": "מזהה מאגר ה VAE",
|
||||
"modelManager": "מנהל המודלים",
|
||||
"model": "מודל",
|
||||
"availableModels": "מודלים זמינים",
|
||||
"notLoaded": "לא נטען",
|
||||
"clearCheckpointFolder": "נקה את תיקיית נקודות הביקורת",
|
||||
"noModelsFound": "לא נמצאו מודלים",
|
||||
"delete": "מחיקה",
|
||||
"deleteMsg1": "האם אתה בטוח שברצונך למחוק רשומת מודל זו מ- InvokeAI?",
|
||||
"deleteMsg2": "פעולה זו לא תמחק את קובץ נקודת הביקורת מהדיסק שלך. ניתן לקרוא אותם מחדש במידת הצורך.",
|
||||
"formMessageDiffusersVAELocation": "מיקום VAE",
|
||||
"formMessageDiffusersVAELocationDesc": "במידה ולא מסופק, InvokeAI תחפש את קובץ ה-VAE במיקום המודל המופיע לעיל.",
|
||||
"convertToDiffusers": "המרה למפזרים",
|
||||
"convert": "המרה",
|
||||
"modelTwo": "מודל 2",
|
||||
"modelThree": "מודל 3",
|
||||
"mergedModelName": "שם מודל ממוזג",
|
||||
"v1": "v1",
|
||||
"invokeRoot": "תיקיית InvokeAI",
|
||||
"customConfig": "תצורה מותאמת אישית",
|
||||
"pathToCustomConfig": "נתיב לתצורה מותאמת אישית",
|
||||
"interpolationType": "סוג אינטרפולציה",
|
||||
"invokeAIFolder": "תיקיית InvokeAI",
|
||||
"sigmoid": "סיגמואיד",
|
||||
"weightedSum": "סכום משוקלל",
|
||||
"modelMergeHeaderHelp2": "רק מפזרים זמינים למיזוג. אם ברצונך למזג מודל של נקודת ביקורת, המר אותו תחילה למפזרים.",
|
||||
"inverseSigmoid": "הפוך סיגמואיד",
|
||||
"convertToDiffusersHelpText3": "קובץ נקודת הביקורת שלך בדיסק לא יימחק או ישונה בכל מקרה. אתה יכול להוסיף את נקודת הביקורת שלך למנהל המודלים שוב אם תרצה בכך.",
|
||||
"convertToDiffusersHelpText4": "זהו תהליך חד פעמי בלבד. התהליך עשוי לקחת בסביבות 30-60 שניות, תלוי במפרט המחשב שלך.",
|
||||
"modelMergeInterpAddDifferenceHelp": "במצב זה, מודל 3 מופחת תחילה ממודל 2. הגרסה המתקבלת משולבת עם מודל 1 עם קצב האלפא שנקבע לעיל."
|
||||
},
|
||||
"common": {
|
||||
"nodesDesc": "מערכת מבוססת צמתים עבור יצירת תמונות עדיין תחת פיתוח. השארו קשובים לעדכונים עבור הפיצ׳ר המדהים הזה.",
|
||||
"languagePickerLabel": "בחירת שפה",
|
||||
"githubLabel": "גיטהאב",
|
||||
"discordLabel": "דיסקורד",
|
||||
"settingsLabel": "הגדרות",
|
||||
"langEnglish": "אנגלית",
|
||||
"langDutch": "הולנדית",
|
||||
"langArabic": "ערבית",
|
||||
"langFrench": "צרפתית",
|
||||
"langGerman": "גרמנית",
|
||||
"langJapanese": "יפנית",
|
||||
"langBrPortuguese": "פורטוגזית",
|
||||
"langRussian": "רוסית",
|
||||
"langSimplifiedChinese": "סינית",
|
||||
"langUkranian": "אוקראינית",
|
||||
"langSpanish": "ספרדית",
|
||||
"img2img": "תמונה לתמונה",
|
||||
"unifiedCanvas": "קנבס מאוחד",
|
||||
"nodes": "צמתים",
|
||||
"postProcessing": "לאחר עיבוד",
|
||||
"postProcessDesc2": "תצוגה ייעודית תשוחרר בקרוב על מנת לתמוך בתהליכים ועיבודים מורכבים.",
|
||||
"postProcessDesc3": "ממשק שורת הפקודה של Invoke AI מציע תכונות שונות אחרות כולל Embiggen.",
|
||||
"close": "סגירה",
|
||||
"statusConnected": "מחובר",
|
||||
"statusDisconnected": "מנותק",
|
||||
"statusError": "שגיאה",
|
||||
"statusPreparing": "בהכנה",
|
||||
"statusProcessingCanceled": "עיבוד בוטל",
|
||||
"statusProcessingComplete": "עיבוד הסתיים",
|
||||
"statusGenerating": "מייצר",
|
||||
"statusGeneratingTextToImage": "מייצר טקסט לתמונה",
|
||||
"statusGeneratingImageToImage": "מייצר תמונה לתמונה",
|
||||
"statusGeneratingInpainting": "מייצר ציור לתוך",
|
||||
"statusGeneratingOutpainting": "מייצר ציור החוצה",
|
||||
"statusIterationComplete": "איטרציה הסתיימה",
|
||||
"statusRestoringFaces": "משחזר פרצופים",
|
||||
"statusRestoringFacesCodeFormer": "משחזר פרצופים (CodeFormer)",
|
||||
"statusUpscaling": "העלאת קנה מידה",
|
||||
"statusUpscalingESRGAN": "העלאת קנה מידה (ESRGAN)",
|
||||
"statusModelChanged": "מודל השתנה",
|
||||
"statusConvertingModel": "ממיר מודל",
|
||||
"statusModelConverted": "מודל הומר",
|
||||
"statusMergingModels": "מיזוג מודלים",
|
||||
"statusMergedModels": "מודלים מוזגו",
|
||||
"hotkeysLabel": "מקשים חמים",
|
||||
"reportBugLabel": "דווח באג",
|
||||
"langItalian": "איטלקית",
|
||||
"upload": "העלאה",
|
||||
"langPolish": "פולנית",
|
||||
"training": "אימון",
|
||||
"load": "טעינה",
|
||||
"back": "אחורה",
|
||||
"statusSavingImage": "שומר תמונה",
|
||||
"statusGenerationComplete": "ייצור הסתיים",
|
||||
"statusRestoringFacesGFPGAN": "משחזר פרצופים (GFPGAN)",
|
||||
"statusLoadingModel": "טוען מודל",
|
||||
"trainingDesc2": "InvokeAI כבר תומך באימון הטמעות מותאמות אישית באמצעות היפוך טקסט באמצעות הסקריפט הראשי.",
|
||||
"postProcessDesc1": "InvokeAI מציעה מגוון רחב של תכונות עיבוד שלאחר. העלאת קנה מידה של תמונה ושחזור פנים כבר זמינים בממשק המשתמש. ניתן לגשת אליהם מתפריט 'אפשרויות מתקדמות' בכרטיסיות 'טקסט לתמונה' ו'תמונה לתמונה'. ניתן גם לעבד תמונות ישירות, באמצעות לחצני הפעולה של התמונה מעל תצוגת התמונה הנוכחית או בתוך המציג.",
|
||||
"trainingDesc1": "תהליך עבודה ייעודי לאימון ההטמעות ונקודות הביקורת שלך באמצעות היפוך טקסט ו-Dreambooth מממשק המשתמש."
|
||||
},
|
||||
"hotkeys": {
|
||||
"toggleGallery": {
|
||||
"desc": "פתח וסגור את מגירת הגלריה",
|
||||
"title": "הצג את הגלריה"
|
||||
},
|
||||
"keyboardShortcuts": "קיצורי מקלדת",
|
||||
"appHotkeys": "קיצורי אפליקציה",
|
||||
"generalHotkeys": "קיצורי דרך כלליים",
|
||||
"galleryHotkeys": "קיצורי דרך של הגלריה",
|
||||
"unifiedCanvasHotkeys": "קיצורי דרך לקנבס המאוחד",
|
||||
"invoke": {
|
||||
"title": "הפעל",
|
||||
"desc": "צור תמונה"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "התמקדות על הבקשה",
|
||||
"desc": "התמקדות על איזור הקלדת הבקשה"
|
||||
},
|
||||
"toggleOptions": {
|
||||
"desc": "פתח וסגור את פאנל ההגדרות",
|
||||
"title": "הצג הגדרות"
|
||||
},
|
||||
"pinOptions": {
|
||||
"title": "הצמד הגדרות",
|
||||
"desc": "הצמד את פאנל ההגדרות"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "הצג את חלון ההצגה",
|
||||
"desc": "פתח וסגור את מציג התמונות"
|
||||
},
|
||||
"changeTabs": {
|
||||
"title": "החלף לשוניות",
|
||||
"desc": "החלף לאיזור עבודה אחר"
|
||||
},
|
||||
"consoleToggle": {
|
||||
"desc": "פתח וסגור את הקונסול",
|
||||
"title": "הצג קונסול"
|
||||
},
|
||||
"setPrompt": {
|
||||
"title": "הגדרת בקשה",
|
||||
"desc": "שימוש בבקשה של התמונה הנוכחית"
|
||||
},
|
||||
"restoreFaces": {
|
||||
"desc": "שחזור התמונה הנוכחית",
|
||||
"title": "שחזור פרצופים"
|
||||
},
|
||||
"upscale": {
|
||||
"title": "הגדלת קנה מידה",
|
||||
"desc": "הגדל את התמונה הנוכחית"
|
||||
},
|
||||
"showInfo": {
|
||||
"title": "הצג מידע",
|
||||
"desc": "הצגת פרטי מטא-נתונים של התמונה הנוכחית"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "שלח לתמונה לתמונה",
|
||||
"desc": "שלח תמונה נוכחית לתמונה לתמונה"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "מחק תמונה",
|
||||
"desc": "מחק את התמונה הנוכחית"
|
||||
},
|
||||
"closePanels": {
|
||||
"title": "סגור לוחות",
|
||||
"desc": "סוגר לוחות פתוחים"
|
||||
},
|
||||
"previousImage": {
|
||||
"title": "תמונה קודמת",
|
||||
"desc": "הצג את התמונה הקודמת בגלריה"
|
||||
},
|
||||
"toggleGalleryPin": {
|
||||
"title": "הצג את מצמיד הגלריה",
|
||||
"desc": "הצמדה וביטול הצמדה של הגלריה לממשק המשתמש"
|
||||
},
|
||||
"decreaseGalleryThumbSize": {
|
||||
"title": "הקטנת גודל תמונת גלריה",
|
||||
"desc": "מקטין את גודל התמונות הממוזערות של הגלריה"
|
||||
},
|
||||
"selectBrush": {
|
||||
"desc": "בוחר את מברשת הקנבס",
|
||||
"title": "בחר מברשת"
|
||||
},
|
||||
"selectEraser": {
|
||||
"title": "בחר מחק",
|
||||
"desc": "בוחר את מחק הקנבס"
|
||||
},
|
||||
"decreaseBrushSize": {
|
||||
"title": "הקטנת גודל המברשת",
|
||||
"desc": "מקטין את גודל מברשת הקנבס/מחק"
|
||||
},
|
||||
"increaseBrushSize": {
|
||||
"desc": "מגדיל את גודל מברשת הקנבס/מחק",
|
||||
"title": "הגדלת גודל המברשת"
|
||||
},
|
||||
"decreaseBrushOpacity": {
|
||||
"title": "הפחת את אטימות המברשת",
|
||||
"desc": "מקטין את האטימות של מברשת הקנבס"
|
||||
},
|
||||
"increaseBrushOpacity": {
|
||||
"title": "הגדל את אטימות המברשת",
|
||||
"desc": "מגביר את האטימות של מברשת הקנבס"
|
||||
},
|
||||
"moveTool": {
|
||||
"title": "כלי הזזה",
|
||||
"desc": "מאפשר ניווט על קנבס"
|
||||
},
|
||||
"fillBoundingBox": {
|
||||
"desc": "ממלא את התיבה התוחמת בצבע מברשת",
|
||||
"title": "מילוי תיבה תוחמת"
|
||||
},
|
||||
"eraseBoundingBox": {
|
||||
"desc": "מוחק את אזור התיבה התוחמת",
|
||||
"title": "מחק תיבה תוחמת"
|
||||
},
|
||||
"colorPicker": {
|
||||
"title": "בחר בבורר צבעים",
|
||||
"desc": "בוחר את בורר צבעי הקנבס"
|
||||
},
|
||||
"toggleSnap": {
|
||||
"title": "הפעל הצמדה",
|
||||
"desc": "מפעיל הצמדה לרשת"
|
||||
},
|
||||
"quickToggleMove": {
|
||||
"title": "הפעלה מהירה להזזה",
|
||||
"desc": "מפעיל זמנית את מצב ההזזה"
|
||||
},
|
||||
"toggleLayer": {
|
||||
"title": "הפעל שכבה",
|
||||
"desc": "הפעל בחירת שכבת בסיס/מסיכה"
|
||||
},
|
||||
"clearMask": {
|
||||
"title": "נקה מסיכה",
|
||||
"desc": "נקה את כל המסכה"
|
||||
},
|
||||
"hideMask": {
|
||||
"desc": "הסתרה והצגה של מסיכה",
|
||||
"title": "הסתר מסיכה"
|
||||
},
|
||||
"showHideBoundingBox": {
|
||||
"title": "הצגה/הסתרה של תיבה תוחמת",
|
||||
"desc": "הפעל תצוגה של התיבה התוחמת"
|
||||
},
|
||||
"mergeVisible": {
|
||||
"title": "מיזוג תוכן גלוי",
|
||||
"desc": "מיזוג כל השכבות הגלויות של הקנבס"
|
||||
},
|
||||
"saveToGallery": {
|
||||
"title": "שמור לגלריה",
|
||||
"desc": "שמור את הקנבס הנוכחי בגלריה"
|
||||
},
|
||||
"copyToClipboard": {
|
||||
"title": "העתק ללוח ההדבקה",
|
||||
"desc": "העתק את הקנבס הנוכחי ללוח ההדבקה"
|
||||
},
|
||||
"downloadImage": {
|
||||
"title": "הורד תמונה",
|
||||
"desc": "הורד את הקנבס הנוכחי"
|
||||
},
|
||||
"undoStroke": {
|
||||
"title": "בטל משיכה",
|
||||
"desc": "בטל משיכת מברשת"
|
||||
},
|
||||
"redoStroke": {
|
||||
"title": "בצע שוב משיכה",
|
||||
"desc": "ביצוע מחדש של משיכת מברשת"
|
||||
},
|
||||
"resetView": {
|
||||
"title": "איפוס תצוגה",
|
||||
"desc": "אפס תצוגת קנבס"
|
||||
},
|
||||
"previousStagingImage": {
|
||||
"desc": "תמונת אזור ההערכות הקודמת",
|
||||
"title": "תמונת הערכות קודמת"
|
||||
},
|
||||
"nextStagingImage": {
|
||||
"title": "תמנות הערכות הבאה",
|
||||
"desc": "תמונת אזור ההערכות הבאה"
|
||||
},
|
||||
"acceptStagingImage": {
|
||||
"desc": "אשר את תמונת איזור ההערכות הנוכחית",
|
||||
"title": "אשר תמונת הערכות"
|
||||
},
|
||||
"cancel": {
|
||||
"desc": "ביטול יצירת תמונה",
|
||||
"title": "ביטול"
|
||||
},
|
||||
"maximizeWorkSpace": {
|
||||
"title": "מקסם את איזור העבודה",
|
||||
"desc": "סגור פאנלים ומקסם את איזור העבודה"
|
||||
},
|
||||
"setSeed": {
|
||||
"title": "הגדר זרע",
|
||||
"desc": "השתמש בזרע התמונה הנוכחית"
|
||||
},
|
||||
"setParameters": {
|
||||
"title": "הגדרת פרמטרים",
|
||||
"desc": "שימוש בכל הפרמטרים של התמונה הנוכחית"
|
||||
},
|
||||
"increaseGalleryThumbSize": {
|
||||
"title": "הגדל את גודל תמונת הגלריה",
|
||||
"desc": "מגדיל את התמונות הממוזערות של הגלריה"
|
||||
},
|
||||
"nextImage": {
|
||||
"title": "תמונה הבאה",
|
||||
"desc": "הצג את התמונה הבאה בגלריה"
|
||||
}
|
||||
},
|
||||
"gallery": {
|
||||
"uploads": "העלאות",
|
||||
"galleryImageSize": "גודל תמונה",
|
||||
"gallerySettings": "הגדרות גלריה",
|
||||
"maintainAspectRatio": "שמור על יחס רוחב-גובה",
|
||||
"autoSwitchNewImages": "החלף אוטומטית לתמונות חדשות",
|
||||
"singleColumnLayout": "תצוגת עמודה אחת",
|
||||
"allImagesLoaded": "כל התמונות נטענו",
|
||||
"loadMore": "טען עוד",
|
||||
"noImagesInGallery": "אין תמונות בגלריה",
|
||||
"galleryImageResetSize": "איפוס גודל",
|
||||
"generations": "דורות",
|
||||
"showGenerations": "הצג דורות",
|
||||
"showUploads": "הצג העלאות"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "תמונות",
|
||||
"steps": "צעדים",
|
||||
"cfgScale": "סולם CFG",
|
||||
"width": "רוחב",
|
||||
"height": "גובה",
|
||||
"seed": "זרע",
|
||||
"imageToImage": "תמונה לתמונה",
|
||||
"randomizeSeed": "זרע אקראי",
|
||||
"variationAmount": "כמות וריאציה",
|
||||
"seedWeights": "משקלי זרע",
|
||||
"faceRestoration": "שחזור פנים",
|
||||
"restoreFaces": "שחזר פנים",
|
||||
"type": "סוג",
|
||||
"strength": "חוזק",
|
||||
"upscale": "הגדלת קנה מידה",
|
||||
"upscaleImage": "הגדלת קנה מידת התמונה",
|
||||
"denoisingStrength": "חוזק מנטרל הרעש",
|
||||
"otherOptions": "אפשרויות אחרות",
|
||||
"hiresOptim": "אופטימיזצית רזולוציה גבוהה",
|
||||
"hiresStrength": "חוזק רזולוציה גבוהה",
|
||||
"codeformerFidelity": "דבקות",
|
||||
"scaleBeforeProcessing": "שנה קנה מידה לפני עיבוד",
|
||||
"scaledWidth": "קנה מידה לאחר שינוי W",
|
||||
"scaledHeight": "קנה מידה לאחר שינוי H",
|
||||
"infillMethod": "שיטת מילוי",
|
||||
"tileSize": "גודל אריח",
|
||||
"boundingBoxHeader": "תיבה תוחמת",
|
||||
"seamCorrectionHeader": "תיקון תפר",
|
||||
"infillScalingHeader": "מילוי וקנה מידה",
|
||||
"toggleLoopback": "הפעל לולאה חוזרת",
|
||||
"symmetry": "סימטריה",
|
||||
"vSymmetryStep": "צעד סימטריה V",
|
||||
"hSymmetryStep": "צעד סימטריה H",
|
||||
"cancel": {
|
||||
"schedule": "ביטול לאחר האיטרציה הנוכחית",
|
||||
"isScheduled": "מבטל",
|
||||
"immediate": "ביטול מיידי",
|
||||
"setType": "הגדר סוג ביטול"
|
||||
},
|
||||
"sendTo": "שליחה אל",
|
||||
"copyImage": "העתקת תמונה",
|
||||
"downloadImage": "הורדת תמונה",
|
||||
"sendToImg2Img": "שליחה לתמונה לתמונה",
|
||||
"sendToUnifiedCanvas": "שליחה אל קנבס מאוחד",
|
||||
"openInViewer": "פתח במציג",
|
||||
"closeViewer": "סגור מציג",
|
||||
"usePrompt": "שימוש בבקשה",
|
||||
"useSeed": "שימוש בזרע",
|
||||
"useAll": "שימוש בהכל",
|
||||
"useInitImg": "שימוש בתמונה ראשונית",
|
||||
"info": "פרטים",
|
||||
"showOptionsPanel": "הצג חלונית אפשרויות",
|
||||
"shuffle": "ערבוב",
|
||||
"noiseThreshold": "סף רעש",
|
||||
"perlinNoise": "רעש פרלין",
|
||||
"variations": "וריאציות",
|
||||
"imageFit": "התאמת תמונה ראשונית לגודל הפלט",
|
||||
"general": "כללי",
|
||||
"upscaling": "מגדיל את קנה מידה",
|
||||
"scale": "סולם",
|
||||
"seamlessTiling": "ריצוף חלק",
|
||||
"img2imgStrength": "חוזק תמונה לתמונה",
|
||||
"initialImage": "תמונה ראשונית",
|
||||
"copyImageToLink": "העתקת תמונה לקישור"
|
||||
},
|
||||
"settings": {
|
||||
"models": "מודלים",
|
||||
"displayInProgress": "הצגת תמונות בתהליך",
|
||||
"confirmOnDelete": "אישור בעת המחיקה",
|
||||
"useSlidersForAll": "שימוש במחוונים לכל האפשרויות",
|
||||
"resetWebUI": "איפוס ממשק משתמש",
|
||||
"resetWebUIDesc1": "איפוס ממשק המשתמש האינטרנטי מאפס רק את המטמון המקומי של הדפדפן של התמונות וההגדרות שנשמרו. זה לא מוחק תמונות מהדיסק.",
|
||||
"resetComplete": "ממשק המשתמש אופס. יש לבצע רענון דף בכדי לטעון אותו מחדש.",
|
||||
"enableImageDebugging": "הפעלת איתור באגים בתמונה",
|
||||
"displayHelpIcons": "הצג סמלי עזרה",
|
||||
"saveSteps": "שמירת תמונות כל n צעדים",
|
||||
"resetWebUIDesc2": "אם תמונות לא מופיעות בגלריה או שמשהו אחר לא עובד, נא לנסות איפוס /או אתחול לפני שליחת תקלה ב-GitHub."
|
||||
},
|
||||
"toast": {
|
||||
"uploadFailed": "העלאה נכשלה",
|
||||
"imageCopied": "התמונה הועתקה",
|
||||
"imageLinkCopied": "קישור תמונה הועתק",
|
||||
"imageNotLoadedDesc": "לא נמצאה תמונה לשליחה למודול תמונה לתמונה",
|
||||
"imageSavedToGallery": "התמונה נשמרה בגלריה",
|
||||
"canvasMerged": "קנבס מוזג",
|
||||
"sentToImageToImage": "נשלח לתמונה לתמונה",
|
||||
"sentToUnifiedCanvas": "נשלח אל קנבס מאוחד",
|
||||
"parametersSet": "הגדרת פרמטרים",
|
||||
"parametersNotSet": "פרמטרים לא הוגדרו",
|
||||
"parametersNotSetDesc": "לא נמצאו מטא-נתונים עבור תמונה זו.",
|
||||
"parametersFailedDesc": "לא ניתן לטעון תמונת התחלה.",
|
||||
"seedSet": "זרע הוגדר",
|
||||
"seedNotSetDesc": "לא ניתן היה למצוא זרע לתמונה זו.",
|
||||
"promptNotSetDesc": "לא היתה אפשרות למצוא בקשה עבור תמונה זו.",
|
||||
"metadataLoadFailed": "טעינת מטא-נתונים נכשלה",
|
||||
"initialImageSet": "סט תמונה ראשוני",
|
||||
"initialImageNotSet": "התמונה הראשונית לא הוגדרה",
|
||||
"initialImageNotSetDesc": "לא ניתן היה לטעון את התמונה הראשונית",
|
||||
"uploadFailedUnableToLoadDesc": "לא ניתן לטעון את הקובץ",
|
||||
"tempFoldersEmptied": "התיקייה הזמנית רוקנה",
|
||||
"downloadImageStarted": "הורדת התמונה החלה",
|
||||
"imageNotLoaded": "לא נטענה תמונה",
|
||||
"parametersFailed": "בעיה בטעינת פרמטרים",
|
||||
"promptNotSet": "בקשה לא הוגדרה",
|
||||
"upscalingFailed": "העלאת קנה המידה נכשלה",
|
||||
"faceRestoreFailed": "שחזור הפנים נכשל",
|
||||
"seedNotSet": "זרע לא הוגדר",
|
||||
"promptSet": "בקשה הוגדרה"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
"gallery": "הגלריה מציגה יצירות מתיקיית הפלטים בעת יצירתם. ההגדרות מאוחסנות בתוך קבצים ונגישות באמצעות תפריט הקשר.",
|
||||
"upscale": "השתמש ב-ESRGAN כדי להגדיל את התמונה מיד לאחר היצירה.",
|
||||
"imageToImage": "תמונה לתמונה טוענת כל תמונה כראשונית, המשמשת לאחר מכן ליצירת תמונה חדשה יחד עם הבקשה. ככל שהערך גבוה יותר, כך תמונת התוצאה תשתנה יותר. ערכים מ- 0.0 עד 1.0 אפשריים, הטווח המומלץ הוא .25-.75",
|
||||
"seamCorrection": "שליטה בטיפול בתפרים גלויים המתרחשים בין תמונות שנוצרו על בד הציור.",
|
||||
"prompt": "זהו שדה הבקשה. הבקשה כוללת אובייקטי יצירה ומונחים סגנוניים. באפשרותך להוסיף משקל (חשיבות אסימון) גם בשורת הפקודה, אך פקודות ופרמטרים של CLI לא יפעלו.",
|
||||
"variations": "נסה וריאציה עם ערך בין 0.1 ל- 1.0 כדי לשנות את התוצאה עבור זרע נתון. וריאציות מעניינות של הזרע הן בין 0.1 ל -0.3.",
|
||||
"other": "אפשרויות אלה יאפשרו מצבי עיבוד חלופיים עבור ההרצה. 'ריצוף חלק' ייצור תבניות חוזרות בפלט. 'רזולוציה גבוהה' נוצר בשני שלבים עם img2img: השתמש בהגדרה זו כאשר אתה רוצה תמונה גדולה וקוהרנטית יותר ללא חפצים. פעולה זאת תקח יותר זמן מפעולת טקסט לתמונה רגילה.",
|
||||
"faceCorrection": "תיקון פנים עם GFPGAN או Codeformer: האלגוריתם מזהה פרצופים בתמונה ומתקן כל פגם. ערך גבוה ישנה את התמונה יותר, וכתוצאה מכך הפרצופים יהיו אטרקטיביים יותר. Codeformer עם נאמנות גבוהה יותר משמר את התמונה המקורית על חשבון תיקון פנים חזק יותר.",
|
||||
"seed": "ערך הזרע משפיע על הרעש הראשוני שממנו נוצרת התמונה. אתה יכול להשתמש בזרעים שכבר קיימים מתמונות קודמות. 'סף רעש' משמש להפחתת חפצים בערכי CFG גבוהים (נסה את טווח 0-10), ופרלין כדי להוסיף רעשי פרלין במהלך היצירה: שניהם משמשים להוספת וריאציה לתפוקות שלך.",
|
||||
"infillAndScaling": "נהל שיטות מילוי (המשמשות באזורים עם מסיכה או אזורים שנמחקו בבד הציור) ושינוי קנה מידה (שימושי לגדלים קטנים של תיבות תוחמות).",
|
||||
"boundingBox": "התיבה התוחמת זהה להגדרות 'רוחב' ו'גובה' עבור 'טקסט לתמונה' או 'תמונה לתמונה'. רק האזור בתיבה יעובד."
|
||||
}
|
||||
},
|
||||
"unifiedCanvas": {
|
||||
"layer": "שכבה",
|
||||
"base": "בסיס",
|
||||
"maskingOptions": "אפשרויות מסכות",
|
||||
"enableMask": "הפעלת מסיכה",
|
||||
"colorPicker": "בוחר הצבעים",
|
||||
"preserveMaskedArea": "שימור איזור ממוסך",
|
||||
"clearMask": "ניקוי מסיכה",
|
||||
"brush": "מברשת",
|
||||
"eraser": "מחק",
|
||||
"fillBoundingBox": "מילוי תיבה תוחמת",
|
||||
"eraseBoundingBox": "מחק תיבה תוחמת",
|
||||
"copyToClipboard": "העתק ללוח ההדבקה",
|
||||
"downloadAsImage": "הורדה כתמונה",
|
||||
"undo": "ביטול",
|
||||
"redo": "ביצוע מחדש",
|
||||
"clearCanvas": "ניקוי קנבס",
|
||||
"showGrid": "הצגת רשת",
|
||||
"snapToGrid": "הצמדה לרשת",
|
||||
"darkenOutsideSelection": "הכהיית בחירה חיצונית",
|
||||
"saveBoxRegionOnly": "שמירת איזור תיבה בלבד",
|
||||
"limitStrokesToBox": "הגבלת משיכות לקופסא",
|
||||
"showCanvasDebugInfo": "הצגת מידע איתור באגים בקנבס",
|
||||
"clearCanvasHistory": "ניקוי הסטוריית קנבס",
|
||||
"clearHistory": "ניקוי היסטוריה",
|
||||
"clearCanvasHistoryConfirm": "האם את/ה בטוח/ה שברצונך לנקות את היסטוריית הקנבס?",
|
||||
"emptyFolder": "ריקון תיקייה",
|
||||
"emptyTempImagesFolderConfirm": "האם את/ה בטוח/ה שברצונך לרוקן את התיקיה הזמנית?",
|
||||
"activeLayer": "שכבה פעילה",
|
||||
"canvasScale": "קנה מידה של קנבס",
|
||||
"betaLimitToBox": "הגבל לקופסא",
|
||||
"betaDarkenOutside": "הכההת הבחוץ",
|
||||
"canvasDimensions": "מידות קנבס",
|
||||
"previous": "הקודם",
|
||||
"next": "הבא",
|
||||
"accept": "אישור",
|
||||
"showHide": "הצג/הסתר",
|
||||
"discardAll": "בטל הכל",
|
||||
"betaClear": "איפוס",
|
||||
"boundingBox": "תיבה תוחמת",
|
||||
"scaledBoundingBox": "תיבה תוחמת לאחר שינוי קנה מידה",
|
||||
"betaPreserveMasked": "שמר מסיכה",
|
||||
"brushOptions": "אפשרויות מברשת",
|
||||
"brushSize": "גודל",
|
||||
"mergeVisible": "מיזוג תוכן גלוי",
|
||||
"move": "הזזה",
|
||||
"resetView": "איפוס תצוגה",
|
||||
"saveToGallery": "שמור לגלריה",
|
||||
"canvasSettings": "הגדרות קנבס",
|
||||
"showIntermediates": "הצגת מתווכים",
|
||||
"autoSaveToGallery": "שמירה אוטומטית בגלריה",
|
||||
"emptyTempImageFolder": "ריקון תיקיית תמונות זמניות",
|
||||
"clearCanvasHistoryMessage": "ניקוי היסטוריית הקנבס משאיר את הקנבס הנוכחי ללא שינוי, אך מנקה באופן בלתי הפיך את היסטוריית הביטול והביצוע מחדש.",
|
||||
"emptyTempImagesFolderMessage": "ריקון תיקיית התמונה הזמנית גם מאפס באופן מלא את הקנבס המאוחד. זה כולל את כל היסטוריית הביטול/ביצוע מחדש, תמונות באזור ההערכות ושכבת הבסיס של בד הציור.",
|
||||
"boundingBoxPosition": "מיקום תיבה תוחמת",
|
||||
"canvasPosition": "מיקום קנבס",
|
||||
"cursorPosition": "מיקום הסמן",
|
||||
"mask": "מסכה"
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user